]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/xen/smp.c
Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / smp.c
1 /*
2  * Xen SMP support
3  *
4  * This file implements the Xen versions of smp_ops.  SMP under Xen is
5  * very straightforward.  Bringing a CPU up is simply a matter of
6  * loading its initial context and setting it running.
7  *
8  * IPIs are handled through the Xen event mechanism.
9  *
10  * Because virtual CPUs can be scheduled onto any real CPU, there's no
11  * useful topology information for the kernel to make use of.  As a
12  * result, all CPUs are treated as if they're single-core and
13  * single-threaded.
14  */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/smp.h>
18
19 #include <asm/paravirt.h>
20 #include <asm/desc.h>
21 #include <asm/pgtable.h>
22 #include <asm/cpu.h>
23
24 #include <xen/interface/xen.h>
25 #include <xen/interface/vcpu.h>
26
27 #include <asm/xen/interface.h>
28 #include <asm/xen/hypercall.h>
29
30 #include <xen/page.h>
31 #include <xen/events.h>
32
33 #include "xen-ops.h"
34 #include "mmu.h"
35
36 cpumask_var_t xen_cpu_initialized_map;
37
38 static DEFINE_PER_CPU(int, resched_irq);
39 static DEFINE_PER_CPU(int, callfunc_irq);
40 static DEFINE_PER_CPU(int, callfuncsingle_irq);
41 static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
45
46 /*
47  * Reschedule call back. Nothing to do,
48  * all the work is done automatically when
49  * we return from the interrupt.
50  */
51 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52 {
53         inc_irq_stat(irq_resched_count);
54
55         return IRQ_HANDLED;
56 }
57
58 static __cpuinit void cpu_bringup(void)
59 {
60         int cpu = smp_processor_id();
61
62         cpu_init();
63         touch_softlockup_watchdog();
64         preempt_disable();
65
66         xen_enable_sysenter();
67         xen_enable_syscall();
68
69         cpu = smp_processor_id();
70         smp_store_cpu_info(cpu);
71         cpu_data(cpu).x86_max_cores = 1;
72         set_cpu_sibling_map(cpu);
73
74         xen_setup_cpu_clockevents();
75
76         cpu_set(cpu, cpu_online_map);
77         percpu_write(cpu_state, CPU_ONLINE);
78         wmb();
79
80         /* We can take interrupts now: we're officially "up". */
81         local_irq_enable();
82
83         wmb();                  /* make sure everything is out */
84 }
85
86 static __cpuinit void cpu_bringup_and_idle(void)
87 {
88         cpu_bringup();
89         cpu_idle();
90 }
91
92 static int xen_smp_intr_init(unsigned int cpu)
93 {
94         int rc;
95         const char *resched_name, *callfunc_name, *debug_name;
96
97         resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
98         rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
99                                     cpu,
100                                     xen_reschedule_interrupt,
101                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
102                                     resched_name,
103                                     NULL);
104         if (rc < 0)
105                 goto fail;
106         per_cpu(resched_irq, cpu) = rc;
107
108         callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
110                                     cpu,
111                                     xen_call_function_interrupt,
112                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
113                                     callfunc_name,
114                                     NULL);
115         if (rc < 0)
116                 goto fail;
117         per_cpu(callfunc_irq, cpu) = rc;
118
119         debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120         rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
121                                      IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
122                                      debug_name, NULL);
123         if (rc < 0)
124                 goto fail;
125         per_cpu(debug_irq, cpu) = rc;
126
127         callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
129                                     cpu,
130                                     xen_call_function_single_interrupt,
131                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
132                                     callfunc_name,
133                                     NULL);
134         if (rc < 0)
135                 goto fail;
136         per_cpu(callfuncsingle_irq, cpu) = rc;
137
138         return 0;
139
140  fail:
141         if (per_cpu(resched_irq, cpu) >= 0)
142                 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
143         if (per_cpu(callfunc_irq, cpu) >= 0)
144                 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
145         if (per_cpu(debug_irq, cpu) >= 0)
146                 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
147         if (per_cpu(callfuncsingle_irq, cpu) >= 0)
148                 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
149
150         return rc;
151 }
152
153 static void __init xen_fill_possible_map(void)
154 {
155         int i, rc;
156
157         for (i = 0; i < nr_cpu_ids; i++) {
158                 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
159                 if (rc >= 0) {
160                         num_processors++;
161                         cpu_set(i, cpu_possible_map);
162                 }
163         }
164 }
165
166 static void __init xen_smp_prepare_boot_cpu(void)
167 {
168         BUG_ON(smp_processor_id() != 0);
169         native_smp_prepare_boot_cpu();
170
171         /* We've switched to the "real" per-cpu gdt, so make sure the
172            old memory can be recycled */
173         make_lowmem_page_readwrite(xen_initial_gdt);
174
175         xen_setup_vcpu_info_placement();
176 }
177
178 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
179 {
180         unsigned cpu;
181
182         xen_init_lock_cpu(0);
183
184         smp_store_cpu_info(0);
185         cpu_data(0).x86_max_cores = 1;
186         set_cpu_sibling_map(0);
187
188         if (xen_smp_intr_init(0))
189                 BUG();
190
191         if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
192                 panic("could not allocate xen_cpu_initialized_map\n");
193
194         cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
195
196         /* Restrict the possible_map according to max_cpus. */
197         while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
198                 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
199                         continue;
200                 cpu_clear(cpu, cpu_possible_map);
201         }
202
203         for_each_possible_cpu (cpu) {
204                 struct task_struct *idle;
205
206                 if (cpu == 0)
207                         continue;
208
209                 idle = fork_idle(cpu);
210                 if (IS_ERR(idle))
211                         panic("failed fork for CPU %d", cpu);
212
213                 cpu_set(cpu, cpu_present_map);
214         }
215 }
216
217 static __cpuinit int
218 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
219 {
220         struct vcpu_guest_context *ctxt;
221         struct desc_struct *gdt;
222
223         if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
224                 return 0;
225
226         ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
227         if (ctxt == NULL)
228                 return -ENOMEM;
229
230         gdt = get_cpu_gdt_table(cpu);
231
232         ctxt->flags = VGCF_IN_KERNEL;
233         ctxt->user_regs.ds = __USER_DS;
234         ctxt->user_regs.es = __USER_DS;
235         ctxt->user_regs.ss = __KERNEL_DS;
236 #ifdef CONFIG_X86_32
237         ctxt->user_regs.fs = __KERNEL_PERCPU;
238 #else
239         ctxt->gs_base_kernel = per_cpu_offset(cpu);
240 #endif
241         ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
242         ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
243
244         memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
245
246         xen_copy_trap_info(ctxt->trap_ctxt);
247
248         ctxt->ldt_ents = 0;
249
250         BUG_ON((unsigned long)gdt & ~PAGE_MASK);
251         make_lowmem_page_readonly(gdt);
252
253         ctxt->gdt_frames[0] = virt_to_mfn(gdt);
254         ctxt->gdt_ents      = GDT_ENTRIES;
255
256         ctxt->user_regs.cs = __KERNEL_CS;
257         ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
258
259         ctxt->kernel_ss = __KERNEL_DS;
260         ctxt->kernel_sp = idle->thread.sp0;
261
262 #ifdef CONFIG_X86_32
263         ctxt->event_callback_cs     = __KERNEL_CS;
264         ctxt->failsafe_callback_cs  = __KERNEL_CS;
265 #endif
266         ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
267         ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
268
269         per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
270         ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
271
272         if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
273                 BUG();
274
275         kfree(ctxt);
276         return 0;
277 }
278
279 static int __cpuinit xen_cpu_up(unsigned int cpu)
280 {
281         struct task_struct *idle = idle_task(cpu);
282         int rc;
283
284         per_cpu(current_task, cpu) = idle;
285 #ifdef CONFIG_X86_32
286         irq_ctx_init(cpu);
287 #else
288         clear_tsk_thread_flag(idle, TIF_FORK);
289         per_cpu(kernel_stack, cpu) =
290                 (unsigned long)task_stack_page(idle) -
291                 KERNEL_STACK_OFFSET + THREAD_SIZE;
292 #endif
293         xen_setup_timer(cpu);
294         xen_init_lock_cpu(cpu);
295
296         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
297
298         /* make sure interrupts start blocked */
299         per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
300
301         rc = cpu_initialize_context(cpu, idle);
302         if (rc)
303                 return rc;
304
305         if (num_online_cpus() == 1)
306                 alternatives_smp_switch(1);
307
308         rc = xen_smp_intr_init(cpu);
309         if (rc)
310                 return rc;
311
312         rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
313         BUG_ON(rc);
314
315         while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
316                 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
317                 barrier();
318         }
319
320         return 0;
321 }
322
323 static void xen_smp_cpus_done(unsigned int max_cpus)
324 {
325 }
326
327 #ifdef CONFIG_HOTPLUG_CPU
328 static int xen_cpu_disable(void)
329 {
330         unsigned int cpu = smp_processor_id();
331         if (cpu == 0)
332                 return -EBUSY;
333
334         cpu_disable_common();
335
336         load_cr3(swapper_pg_dir);
337         return 0;
338 }
339
340 static void xen_cpu_die(unsigned int cpu)
341 {
342         while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
343                 current->state = TASK_UNINTERRUPTIBLE;
344                 schedule_timeout(HZ/10);
345         }
346         unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
347         unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
348         unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
349         unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
350         xen_uninit_lock_cpu(cpu);
351         xen_teardown_timer(cpu);
352
353         if (num_online_cpus() == 1)
354                 alternatives_smp_switch(0);
355 }
356
357 static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
358 {
359         play_dead_common();
360         HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
361         cpu_bringup();
362 }
363
364 #else /* !CONFIG_HOTPLUG_CPU */
365 static int xen_cpu_disable(void)
366 {
367         return -ENOSYS;
368 }
369
370 static void xen_cpu_die(unsigned int cpu)
371 {
372         BUG();
373 }
374
375 static void xen_play_dead(void)
376 {
377         BUG();
378 }
379
380 #endif
381 static void stop_self(void *v)
382 {
383         int cpu = smp_processor_id();
384
385         /* make sure we're not pinning something down */
386         load_cr3(swapper_pg_dir);
387         /* should set up a minimal gdt */
388
389         HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
390         BUG();
391 }
392
393 static void xen_smp_send_stop(void)
394 {
395         smp_call_function(stop_self, NULL, 0);
396 }
397
398 static void xen_smp_send_reschedule(int cpu)
399 {
400         xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
401 }
402
403 static void xen_send_IPI_mask(const struct cpumask *mask,
404                               enum ipi_vector vector)
405 {
406         unsigned cpu;
407
408         for_each_cpu_and(cpu, mask, cpu_online_mask)
409                 xen_send_IPI_one(cpu, vector);
410 }
411
412 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
413 {
414         int cpu;
415
416         xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
417
418         /* Make sure other vcpus get a chance to run if they need to. */
419         for_each_cpu(cpu, mask) {
420                 if (xen_vcpu_stolen(cpu)) {
421                         HYPERVISOR_sched_op(SCHEDOP_yield, 0);
422                         break;
423                 }
424         }
425 }
426
427 static void xen_smp_send_call_function_single_ipi(int cpu)
428 {
429         xen_send_IPI_mask(cpumask_of(cpu),
430                           XEN_CALL_FUNCTION_SINGLE_VECTOR);
431 }
432
433 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
434 {
435         irq_enter();
436         generic_smp_call_function_interrupt();
437         inc_irq_stat(irq_call_count);
438         irq_exit();
439
440         return IRQ_HANDLED;
441 }
442
443 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
444 {
445         irq_enter();
446         generic_smp_call_function_single_interrupt();
447         inc_irq_stat(irq_call_count);
448         irq_exit();
449
450         return IRQ_HANDLED;
451 }
452
453 static const struct smp_ops xen_smp_ops __initdata = {
454         .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
455         .smp_prepare_cpus = xen_smp_prepare_cpus,
456         .smp_cpus_done = xen_smp_cpus_done,
457
458         .cpu_up = xen_cpu_up,
459         .cpu_die = xen_cpu_die,
460         .cpu_disable = xen_cpu_disable,
461         .play_dead = xen_play_dead,
462
463         .smp_send_stop = xen_smp_send_stop,
464         .smp_send_reschedule = xen_smp_send_reschedule,
465
466         .send_call_func_ipi = xen_smp_send_call_function_ipi,
467         .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
468 };
469
470 void __init xen_smp_init(void)
471 {
472         smp_ops = xen_smp_ops;
473         xen_fill_possible_map();
474         xen_init_spinlocks();
475 }