2 * linux/kernel/time/tick-common.c
4 * This file contains the base functions to manage periodic tick
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/tick.h>
23 #include <asm/irq_regs.h>
25 #include "tick-internal.h"
30 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
32 * Tick next event: keeps track of the tick time
34 ktime_t tick_next_period;
36 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37 DEFINE_SPINLOCK(tick_device_lock);
40 * Debugging: see timer_list.c
42 struct tick_device *tick_get_device(int cpu)
44 return &per_cpu(tick_cpu_device, cpu);
48 * tick_is_oneshot_available - check for a oneshot capable event device
50 int tick_is_oneshot_available(void)
52 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
60 static void tick_periodic(int cpu)
62 if (tick_do_timer_cpu == cpu) {
63 write_seqlock(&xtime_lock);
65 /* Keep track of the next tick event */
66 tick_next_period = ktime_add(tick_next_period, tick_period);
69 write_sequnlock(&xtime_lock);
72 update_process_times(user_mode(get_irq_regs()));
73 profile_tick(CPU_PROFILING);
77 * Event handler for periodic ticks
79 void tick_handle_periodic(struct clock_event_device *dev)
81 int cpu = smp_processor_id();
86 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
89 * Setup the next period for devices, which do not have
92 next = ktime_add(dev->next_event, tick_period);
94 if (!clockevents_program_event(dev, next, ktime_get()))
97 next = ktime_add(next, tick_period);
102 * Setup the device for a periodic tick
104 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
106 tick_set_periodic_handler(dev, broadcast);
108 /* Broadcast setup ? */
109 if (!tick_device_is_functional(dev))
112 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
113 !tick_broadcast_oneshot_active()) {
114 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
120 seq = read_seqbegin(&xtime_lock);
121 next = tick_next_period;
122 } while (read_seqretry(&xtime_lock, seq));
124 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
127 if (!clockevents_program_event(dev, next, ktime_get()))
129 next = ktime_add(next, tick_period);
135 * Setup the tick device
137 static void tick_setup_device(struct tick_device *td,
138 struct clock_event_device *newdev, int cpu,
139 const struct cpumask *cpumask)
142 void (*handler)(struct clock_event_device *) = NULL;
145 * First device setup ?
149 * If no cpu took the do_timer update, assign it to
152 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
153 tick_do_timer_cpu = cpu;
154 tick_next_period = ktime_get();
155 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
159 * Startup in periodic mode first.
161 td->mode = TICKDEV_MODE_PERIODIC;
163 handler = td->evtdev->event_handler;
164 next_event = td->evtdev->next_event;
165 td->evtdev->event_handler = clockevents_handle_noop;
171 * When the device is not per cpu, pin the interrupt to the
174 if (!cpumask_equal(newdev->cpumask, cpumask))
175 irq_set_affinity(newdev->irq, cpumask);
178 * When global broadcasting is active, check if the current
179 * device is registered as a placeholder for broadcast mode.
180 * This allows us to handle this x86 misfeature in a generic
183 if (tick_device_uses_broadcast(newdev, cpu))
186 if (td->mode == TICKDEV_MODE_PERIODIC)
187 tick_setup_periodic(newdev, 0);
189 tick_setup_oneshot(newdev, handler, next_event);
193 * Check, if the new registered device should be used.
195 static int tick_check_new_device(struct clock_event_device *newdev)
197 struct clock_event_device *curdev;
198 struct tick_device *td;
199 int cpu, ret = NOTIFY_OK;
202 spin_lock_irqsave(&tick_device_lock, flags);
204 cpu = smp_processor_id();
205 if (!cpumask_test_cpu(cpu, newdev->cpumask))
208 td = &per_cpu(tick_cpu_device, cpu);
211 /* cpu local device ? */
212 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
215 * If the cpu affinity of the device interrupt can not
218 if (!irq_can_set_affinity(newdev->irq))
222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device
225 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
230 * If we have an active device, then check the rating and the oneshot
235 * Prefer one shot capable devices !
237 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
238 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
243 if (curdev->rating >= newdev->rating)
248 * Replace the eventually existing device by the new
249 * device. If the current device is the broadcast device, do
250 * not give it back to the clockevents layer !
252 if (tick_is_broadcast_device(curdev)) {
253 clockevents_shutdown(curdev);
256 clockevents_exchange_device(curdev, newdev);
257 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
259 tick_oneshot_notify();
261 spin_unlock_irqrestore(&tick_device_lock, flags);
266 * Can the new device be used as a broadcast device ?
268 if (tick_check_broadcast_device(newdev))
271 spin_unlock_irqrestore(&tick_device_lock, flags);
277 * Shutdown an event device on a given cpu:
279 * This is called on a life CPU, when a CPU is dead. So we cannot
280 * access the hardware device itself.
281 * We just set the mode and remove it from the lists.
283 static void tick_shutdown(unsigned int *cpup)
285 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
286 struct clock_event_device *dev = td->evtdev;
289 spin_lock_irqsave(&tick_device_lock, flags);
290 td->mode = TICKDEV_MODE_PERIODIC;
293 * Prevent that the clock events layer tries to call
294 * the set mode function!
296 dev->mode = CLOCK_EVT_MODE_UNUSED;
297 clockevents_exchange_device(dev, NULL);
300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = cpumask_first(cpu_online_mask);
304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
307 spin_unlock_irqrestore(&tick_device_lock, flags);
310 static void tick_suspend(void)
312 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
315 spin_lock_irqsave(&tick_device_lock, flags);
316 clockevents_shutdown(td->evtdev);
317 spin_unlock_irqrestore(&tick_device_lock, flags);
320 static void tick_resume(void)
322 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
324 int broadcast = tick_resume_broadcast();
326 spin_lock_irqsave(&tick_device_lock, flags);
327 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
330 if (td->mode == TICKDEV_MODE_PERIODIC)
331 tick_setup_periodic(td->evtdev, 0);
333 tick_resume_oneshot();
335 spin_unlock_irqrestore(&tick_device_lock, flags);
339 * Notification about clock event devices
341 static int tick_notify(struct notifier_block *nb, unsigned long reason,
346 case CLOCK_EVT_NOTIFY_ADD:
347 return tick_check_new_device(dev);
349 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
350 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
351 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
352 tick_broadcast_on_off(reason, dev);
355 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
356 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
357 tick_broadcast_oneshot_control(reason);
360 case CLOCK_EVT_NOTIFY_CPU_DEAD:
361 tick_shutdown_broadcast_oneshot(dev);
362 tick_shutdown_broadcast(dev);
366 case CLOCK_EVT_NOTIFY_SUSPEND:
368 tick_suspend_broadcast();
371 case CLOCK_EVT_NOTIFY_RESUME:
382 static struct notifier_block tick_notifier = {
383 .notifier_call = tick_notify,
387 * tick_init - initialize the tick control
389 * Register the notifier with the clockevents framework
391 void __init tick_init(void)
393 clockevents_register_notifier(&tick_notifier);