1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/module.h>
9 #include <linux/clockchips.h>
10 #include <linux/ftrace.h>
11 #include <asm/system.h>
14 unsigned long idle_halt;
15 EXPORT_SYMBOL(idle_halt);
16 unsigned long idle_nomwait;
17 EXPORT_SYMBOL(idle_nomwait);
19 struct kmem_cache *task_xstate_cachep;
21 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
24 if (src->thread.xstate) {
25 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
27 if (!dst->thread.xstate)
29 WARN_ON((unsigned long)dst->thread.xstate & 15);
30 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
35 void free_thread_xstate(struct task_struct *tsk)
37 if (tsk->thread.xstate) {
38 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
39 tsk->thread.xstate = NULL;
43 void free_thread_info(struct thread_info *ti)
45 free_thread_xstate(ti->task);
46 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
49 void arch_task_cache_init(void)
52 kmem_cache_create("task_xstate", xstate_size,
53 __alignof__(union thread_xstate),
58 * Idle related variables and functions
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 EXPORT_SYMBOL(pm_idle);
71 * This halt magic was a workaround for ancient floppy DMA
72 * wreckage. It should be safe to remove.
74 static int hlt_counter;
75 void disable_hlt(void)
79 EXPORT_SYMBOL(disable_hlt);
85 EXPORT_SYMBOL(enable_hlt);
87 static inline int hlt_use_halt(void)
89 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
92 static inline int hlt_use_halt(void)
99 * We use this if we don't have any better
102 void default_idle(void)
104 if (hlt_use_halt()) {
105 struct power_trace it;
107 trace_power_start(&it, POWER_CSTATE, 1);
108 current_thread_info()->status &= ~TS_POLLING;
110 * TS_POLLING-cleared state must be visible before we
116 safe_halt(); /* enables interrupts racelessly */
119 current_thread_info()->status |= TS_POLLING;
120 trace_power_end(&it);
123 /* loop is done by the caller */
127 #ifdef CONFIG_APM_MODULE
128 EXPORT_SYMBOL(default_idle);
131 void stop_this_cpu(void *dummy)
137 cpu_clear(smp_processor_id(), cpu_online_map);
138 disable_local_APIC();
141 if (hlt_works(smp_processor_id()))
146 static void do_nothing(void *unused)
151 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
152 * pm_idle and update to new pm_idle value. Required while changing pm_idle
153 * handler on SMP systems.
155 * Caller must have changed pm_idle to the new value before the call. Old
156 * pm_idle value will not be used by any CPU after the return of this function.
158 void cpu_idle_wait(void)
161 /* kick all the CPUs so that they exit out of pm_idle */
162 smp_call_function(do_nothing, NULL, 1);
164 EXPORT_SYMBOL_GPL(cpu_idle_wait);
167 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
168 * which can obviate IPI to trigger checking of need_resched.
169 * We execute MONITOR against need_resched and enter optimized wait state
170 * through MWAIT. Whenever someone changes need_resched, we would be woken
171 * up from MWAIT (without an IPI).
173 * New with Core Duo processors, MWAIT can take some hints based on CPU
176 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
178 struct power_trace it;
180 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
181 if (!need_resched()) {
182 __monitor((void *)¤t_thread_info()->flags, 0, 0);
187 trace_power_end(&it);
190 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
191 static void mwait_idle(void)
193 struct power_trace it;
194 if (!need_resched()) {
195 trace_power_start(&it, POWER_CSTATE, 1);
196 __monitor((void *)¤t_thread_info()->flags, 0, 0);
202 trace_power_end(&it);
208 * On SMP it's slightly faster (but much more power-consuming!)
209 * to poll the ->work.need_resched flag instead of waiting for the
210 * cross-CPU IPI to arrive. Use this option with caution.
212 static void poll_idle(void)
214 struct power_trace it;
216 trace_power_start(&it, POWER_CSTATE, 0);
218 while (!need_resched())
220 trace_power_end(&it);
224 * mwait selection logic:
226 * It depends on the CPU. For AMD CPUs that support MWAIT this is
227 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
228 * then depend on a clock divisor and current Pstate of the core. If
229 * all cores of a processor are in halt state (C1) the processor can
230 * enter the C1E (C1 enhanced) state. If mwait is used this will never
233 * idle=mwait overrides this decision and forces the usage of mwait.
235 static int __cpuinitdata force_mwait;
237 #define MWAIT_INFO 0x05
238 #define MWAIT_ECX_EXTENDED_INFO 0x01
239 #define MWAIT_EDX_C1 0xf0
241 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
243 u32 eax, ebx, ecx, edx;
248 if (c->cpuid_level < MWAIT_INFO)
251 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
252 /* Check, whether EDX has extended info about MWAIT */
253 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
257 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
260 return (edx & MWAIT_EDX_C1);
264 * Check for AMD CPUs, which have potentially C1E support
266 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
268 if (c->x86_vendor != X86_VENDOR_AMD)
274 /* Family 0x0f models < rev F do not have C1E */
275 if (c->x86 == 0x0f && c->x86_model < 0x40)
281 static cpumask_t c1e_mask = CPU_MASK_NONE;
282 static int c1e_detected;
284 void c1e_remove_cpu(int cpu)
286 cpu_clear(cpu, c1e_mask);
290 * C1E aware idle routine. We check for C1E active in the interrupt
291 * pending message MSR. If we detect C1E, then we handle it the same
292 * way as C3 power states (local apic timer and TSC stop)
294 static void c1e_idle(void)
302 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
303 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
305 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
306 mark_tsc_unstable("TSC halt in AMD C1E");
307 printk(KERN_INFO "System has AMD C1E enabled\n");
308 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
313 int cpu = smp_processor_id();
315 if (!cpu_isset(cpu, c1e_mask)) {
316 cpu_set(cpu, c1e_mask);
318 * Force broadcast so ACPI can not interfere. Needs
319 * to run with interrupts enabled as it uses
323 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
325 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
329 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
334 * The switch back from broadcast mode needs to be
335 * called with interrupts disabled.
338 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
344 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
346 #ifdef CONFIG_X86_SMP
347 if (pm_idle == poll_idle && smp_num_siblings > 1) {
348 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
349 " performance may degrade.\n");
355 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
357 * One CPU supports mwait => All CPUs supports mwait
359 printk(KERN_INFO "using mwait in idle threads.\n");
360 pm_idle = mwait_idle;
361 } else if (check_c1e_idle(c)) {
362 printk(KERN_INFO "using C1E aware idle routine\n");
365 pm_idle = default_idle;
368 static int __init idle_setup(char *str)
373 if (!strcmp(str, "poll")) {
374 printk("using polling idle threads.\n");
376 } else if (!strcmp(str, "mwait"))
378 else if (!strcmp(str, "halt")) {
380 * When the boot option of idle=halt is added, halt is
381 * forced to be used for CPU idle. In such case CPU C2/C3
382 * won't be used again.
383 * To continue to load the CPU idle driver, don't touch
384 * the boot_option_idle_override.
386 pm_idle = default_idle;
389 } else if (!strcmp(str, "nomwait")) {
391 * If the boot option of "idle=nomwait" is added,
392 * it means that mwait will be disabled for CPU C2/C3
393 * states. In such case it won't touch the variable
394 * of boot_option_idle_override.
401 boot_option_idle_override = 1;
404 early_param("idle", idle_setup);