* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/bootinfo.h>
+#include <asm/cache.h>
#include <asm/compiler.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#define TICK_SIZE (tick_nsec / 1000)
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
/*
* forward reference
*/
static unsigned int sll32_usecs_per_cycle;
/* how many counter cycles in a jiffy */
-static unsigned long cycles_per_jiffy;
+static unsigned long cycles_per_jiffy __read_mostly;
/* Cycle counter value at the previous timer interrupt.. */
static unsigned int timerhi, timerlo;
return 0;
}
-static void null_hpt_init(unsigned int count) { /* nothing */ }
+static void null_hpt_init(unsigned int count)
+{
+ /* nothing */
+}
/*
{
unsigned int count;
+#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */
/* Ack this timer interrupt and set the next one. */
expirelo += cycles_per_jiffy;
+#endif
write_c0_compare(expirelo);
/* Check to see if we have missed any timer interrupts. */
unsigned long seq;
unsigned long lost;
unsigned long usec, sec;
- unsigned long max_ntp_tick = tick_usec - tickadj;
+ unsigned long max_ntp_tick;
do {
seq = read_seqbegin(&xtime_lock);
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0)) {
+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
usec = min(usec, max_ntp_tick);
if (lost)
usec += lost * max_ntp_tick;
} else if (unlikely(lost))
- usec += lost * tick_usec;
+ usec += lost * (USEC_PER_SEC / HZ);
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
-
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
unsigned long j;
unsigned int count;
+ write_seqlock(&xtime_lock);
+
count = mips_hpt_read();
mips_timer_ack();
* CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- write_seqlock(&xtime_lock);
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
last_rtc_update = xtime.tv_sec - 600;
}
}
- write_sequnlock(&xtime_lock);
/*
* If jiffies has overflown in this timer_interrupt, we must
}
}
+ write_sequnlock(&xtime_lock);
+
/*
* In UP mode, we call local_timer_interrupt() to do profiling
* and process accouting.
return IRQ_HANDLED;
}
+int null_perf_irq(struct pt_regs *regs)
+{
+ return 0;
+}
+
+int (*perf_irq)(struct pt_regs *regs) = null_perf_irq;
+
+EXPORT_SYMBOL(null_perf_irq);
+EXPORT_SYMBOL(perf_irq);
+
asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
{
+ int r2 = cpu_has_mips_r2;
+
irq_enter();
kstat_this_cpu.irqs[irq]++;
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run the
+ * performance counter interrupt handler anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 26)))
+ if (perf_irq(regs))
+ goto out;
+
/* we keep interrupt disabled all the time */
- timer_interrupt(irq, NULL, regs);
+ if (!r2 || (read_c0_cause() & (1 << 30)))
+ timer_interrupt(irq, NULL, regs);
+out:
irq_exit();
}
mips_hpt_init = c0_hpt_init;
}
- if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) ||
- (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
- (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
+ if (cpu_has_mips32r1 || cpu_has_mips32r2 ||
+ (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
+ (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
/*
* We need to calibrate the counter but we don't have
* 64-bit division.