2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
13 * Create a semi stable clock from a mixture of other events, including:
17 * - explicit idle events
19 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
20 * making it monotonic and keeping it within an expected window. This window
21 * is set up using jiffies.
23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
24 * that is otherwise invisible (TSC gets stopped).
26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
27 * consistent between cpus (never more than 1 jiffies difference).
29 #include <linux/sched.h>
30 #include <linux/percpu.h>
31 #include <linux/spinlock.h>
32 #include <linux/ktime.h>
33 #include <linux/module.h>
36 * Scheduler clock - returns current time in nanosec units.
37 * This is default implementation.
38 * Architectures and sub-architectures can override this.
40 unsigned long long __attribute__((weak)) sched_clock(void)
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
45 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47 #define MULTI_SHIFT 15
48 /* Max is double, Min is 1/2 */
49 #define MAX_MULTI (2LL << MULTI_SHIFT)
50 #define MIN_MULTI (1LL << (MULTI_SHIFT-1))
52 struct sched_clock_data {
54 * Raw spinlock - this is a special case: this might be called
55 * from within instrumentation code so we dont want to do any
56 * instrumentation ourselves.
60 unsigned long tick_jiffies;
71 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
73 static inline struct sched_clock_data *this_scd(void)
75 return &__get_cpu_var(sched_clock_data);
78 static inline struct sched_clock_data *cpu_sdc(int cpu)
80 return &per_cpu(sched_clock_data, cpu);
83 static __read_mostly int sched_clock_running;
85 void sched_clock_init(void)
87 u64 ktime_now = ktime_to_ns(ktime_get());
88 unsigned long now_jiffies = jiffies;
91 for_each_possible_cpu(cpu) {
92 struct sched_clock_data *scd = cpu_sdc(cpu);
94 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
95 scd->tick_jiffies = now_jiffies;
98 scd->tick_gtod = ktime_now;
99 scd->clock = ktime_now;
100 scd->multi = 1 << MULTI_SHIFT;
106 sched_clock_running = 1;
111 * The dynamic ticks makes the delta jiffies inaccurate. This
112 * prevents us from checking the maximum time update.
113 * Disable the maximum check during stopped ticks.
115 void sched_clock_tick_stop(int cpu)
117 struct sched_clock_data *scd = cpu_sdc(cpu);
122 void sched_clock_tick_start(int cpu)
124 struct sched_clock_data *scd = cpu_sdc(cpu);
129 static int check_max(struct sched_clock_data *scd)
131 return scd->check_max;
134 static int check_max(struct sched_clock_data *scd)
138 #endif /* CONFIG_NO_HZ */
141 * update the percpu scd from the raw @now value
143 * - filter out backward motion
144 * - use jiffies to generate a min,max window to clip the raw values
146 static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
148 unsigned long now_jiffies = jiffies;
149 long delta_jiffies = now_jiffies - scd->tick_jiffies;
150 u64 clock = scd->clock;
151 u64 min_clock, max_clock;
152 s64 delta = now - scd->prev_raw;
154 WARN_ON_ONCE(!irqs_disabled());
157 * At schedule tick the clock can be just under the gtod. We don't
158 * want to push it too prematurely.
160 min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
161 if (min_clock > TICK_NSEC)
162 min_clock -= TICK_NSEC / 2;
164 if (unlikely(delta < 0)) {
170 * The clock must stay within a jiffie of the gtod.
171 * But since we may be at the start of a jiffy or the end of one
172 * we add another jiffy buffer.
174 max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
177 delta >>= MULTI_SHIFT;
179 if (unlikely(clock + delta > max_clock) && check_max(scd)) {
180 if (clock < max_clock)
189 if (unlikely(clock < min_clock))
200 static void lock_double_clock(struct sched_clock_data *data1,
201 struct sched_clock_data *data2)
204 __raw_spin_lock(&data1->lock);
205 __raw_spin_lock(&data2->lock);
207 __raw_spin_lock(&data2->lock);
208 __raw_spin_lock(&data1->lock);
212 u64 sched_clock_cpu(int cpu)
214 struct sched_clock_data *scd = cpu_sdc(cpu);
217 if (unlikely(!sched_clock_running))
220 WARN_ON_ONCE(!irqs_disabled());
223 if (cpu != raw_smp_processor_id()) {
225 * in order to update a remote cpu's clock based on our
226 * unstable raw time rebase it against:
227 * tick_raw (offset between raw counters)
228 * tick_gotd (tick offset between cpus)
230 struct sched_clock_data *my_scd = this_scd();
232 lock_double_clock(scd, my_scd);
234 now -= my_scd->tick_raw;
235 now += scd->tick_raw;
237 now += my_scd->tick_gtod;
238 now -= scd->tick_gtod;
240 __raw_spin_unlock(&my_scd->lock);
242 __update_sched_clock(scd, now, &clock);
244 __raw_spin_unlock(&scd->lock);
247 __raw_spin_lock(&scd->lock);
248 __update_sched_clock(scd, now, NULL);
250 __raw_spin_unlock(&scd->lock);
256 void sched_clock_tick(void)
258 struct sched_clock_data *scd = this_scd();
259 unsigned long now_jiffies = jiffies;
260 s64 mult, delta_gtod, delta_raw;
263 if (unlikely(!sched_clock_running))
266 WARN_ON_ONCE(!irqs_disabled());
268 now_gtod = ktime_to_ns(ktime_get());
271 __raw_spin_lock(&scd->lock);
272 __update_sched_clock(scd, now, NULL);
274 * update tick_gtod after __update_sched_clock() because that will
275 * already observe 1 new jiffy; adding a new tick_gtod to that would
276 * increase the clock 2 jiffies.
278 delta_gtod = now_gtod - scd->tick_gtod;
279 delta_raw = now - scd->tick_raw;
281 if ((long)delta_raw > 0) {
282 mult = delta_gtod << MULTI_SHIFT;
283 do_div(mult, delta_raw);
285 if (scd->multi > MAX_MULTI)
286 scd->multi = MAX_MULTI;
287 else if (scd->multi < MIN_MULTI)
288 scd->multi = MIN_MULTI;
290 scd->multi = 1 << MULTI_SHIFT;
293 scd->tick_gtod = now_gtod;
294 scd->tick_jiffies = now_jiffies;
295 __raw_spin_unlock(&scd->lock);
299 * We are going deep-idle (irqs are disabled):
301 void sched_clock_idle_sleep_event(void)
303 sched_clock_cpu(smp_processor_id());
305 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
308 * We just idled delta nanoseconds (called with irqs disabled):
310 void sched_clock_idle_wakeup_event(u64 delta_ns)
312 struct sched_clock_data *scd = this_scd();
313 u64 now = sched_clock();
316 * Override the previous timestamp and ignore all
317 * sched_clock() deltas that occured while we idled,
318 * and use the PM-provided delta_ns to advance the
321 __raw_spin_lock(&scd->lock);
323 scd->clock += delta_ns;
324 scd->multi = 1 << MULTI_SHIFT;
325 __raw_spin_unlock(&scd->lock);
327 touch_softlockup_watchdog();
329 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
333 unsigned long long cpu_clock(int cpu)
335 unsigned long long clock;
338 local_irq_save(flags);
339 clock = sched_clock_cpu(cpu);
340 local_irq_restore(flags);
344 EXPORT_SYMBOL_GPL(cpu_clock);