4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 * This does not handle HOTPLUG_CPU yet.
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/smp.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/events.h>
38 cpumask_t xen_cpu_initialized_map;
40 static DEFINE_PER_CPU(int, resched_irq);
41 static DEFINE_PER_CPU(int, callfunc_irq);
42 static DEFINE_PER_CPU(int, callfuncsingle_irq);
43 static DEFINE_PER_CPU(int, debug_irq) = -1;
45 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
46 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
49 * Reschedule call back. Nothing to do,
50 * all the work is done automatically when
51 * we return from the interrupt.
53 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
56 __get_cpu_var(irq_stat).irq_resched_count++;
58 add_pda(irq_resched_count, 1);
64 static __cpuinit void cpu_bringup_and_idle(void)
66 int cpu = smp_processor_id();
71 xen_enable_sysenter();
73 cpu = smp_processor_id();
74 smp_store_cpu_info(cpu);
75 cpu_data(cpu).x86_max_cores = 1;
76 set_cpu_sibling_map(cpu);
78 xen_setup_cpu_clockevents();
80 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE);
84 /* We can take interrupts now: we're officially "up". */
87 wmb(); /* make sure everything is out */
91 static int xen_smp_intr_init(unsigned int cpu)
94 const char *resched_name, *callfunc_name, *debug_name;
96 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
97 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
99 xen_reschedule_interrupt,
100 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
105 per_cpu(resched_irq, cpu) = rc;
107 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
108 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
110 xen_call_function_interrupt,
111 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
116 per_cpu(callfunc_irq, cpu) = rc;
118 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
119 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
120 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
124 per_cpu(debug_irq, cpu) = rc;
126 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
127 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
129 xen_call_function_single_interrupt,
130 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
135 per_cpu(callfuncsingle_irq, cpu) = rc;
140 if (per_cpu(resched_irq, cpu) >= 0)
141 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
142 if (per_cpu(callfunc_irq, cpu) >= 0)
143 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
144 if (per_cpu(debug_irq, cpu) >= 0)
145 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
146 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
147 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
152 static void __init xen_fill_possible_map(void)
156 for (i = 0; i < NR_CPUS; i++) {
157 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
160 cpu_set(i, cpu_possible_map);
165 static void __init xen_smp_prepare_boot_cpu(void)
167 BUG_ON(smp_processor_id() != 0);
168 native_smp_prepare_boot_cpu();
170 /* We've switched to the "real" per-cpu gdt, so make sure the
171 old memory can be recycled */
172 make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
174 xen_setup_vcpu_info_placement();
177 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
181 smp_store_cpu_info(0);
182 cpu_data(0).x86_max_cores = 1;
183 set_cpu_sibling_map(0);
185 if (xen_smp_intr_init(0))
188 xen_cpu_initialized_map = cpumask_of_cpu(0);
190 /* Restrict the possible_map according to max_cpus. */
191 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
192 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
194 cpu_clear(cpu, cpu_possible_map);
197 for_each_possible_cpu (cpu) {
198 struct task_struct *idle;
203 idle = fork_idle(cpu);
205 panic("failed fork for CPU %d", cpu);
207 cpu_set(cpu, cpu_present_map);
210 //init_xenbus_allowed_cpumask();
214 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
216 struct vcpu_guest_context *ctxt;
217 struct desc_struct *gdt;
219 if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
222 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
226 gdt = get_cpu_gdt_table(cpu);
228 ctxt->flags = VGCF_IN_KERNEL;
229 ctxt->user_regs.ds = __USER_DS;
230 ctxt->user_regs.es = __USER_DS;
231 ctxt->user_regs.ss = __KERNEL_DS;
233 ctxt->user_regs.fs = __KERNEL_PERCPU;
235 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
236 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
238 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
240 xen_copy_trap_info(ctxt->trap_ctxt);
244 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
245 make_lowmem_page_readonly(gdt);
247 ctxt->gdt_frames[0] = virt_to_mfn(gdt);
248 ctxt->gdt_ents = GDT_ENTRIES;
250 ctxt->user_regs.cs = __KERNEL_CS;
251 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
253 ctxt->kernel_ss = __KERNEL_DS;
254 ctxt->kernel_sp = idle->thread.sp0;
257 ctxt->event_callback_cs = __KERNEL_CS;
258 ctxt->failsafe_callback_cs = __KERNEL_CS;
260 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
261 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
263 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
264 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
266 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
273 static int __cpuinit xen_cpu_up(unsigned int cpu)
275 struct task_struct *idle = idle_task(cpu);
279 rc = cpu_up_check(cpu);
285 /* Allocate node local memory for AP pdas */
288 rc = get_local_pda(cpu);
296 per_cpu(current_task, cpu) = idle;
299 cpu_pda(cpu)->pcurrent = idle;
300 clear_tsk_thread_flag(idle, TIF_FORK);
302 xen_setup_timer(cpu);
304 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
306 /* make sure interrupts start blocked */
307 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
309 rc = cpu_initialize_context(cpu, idle);
313 if (num_online_cpus() == 1)
314 alternatives_smp_switch(1);
316 rc = xen_smp_intr_init(cpu);
320 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
323 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
324 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
331 static void xen_smp_cpus_done(unsigned int max_cpus)
335 static void stop_self(void *v)
337 int cpu = smp_processor_id();
339 /* make sure we're not pinning something down */
340 load_cr3(swapper_pg_dir);
341 /* should set up a minimal gdt */
343 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
347 static void xen_smp_send_stop(void)
349 smp_call_function(stop_self, NULL, 0);
352 static void xen_smp_send_reschedule(int cpu)
354 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
357 static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
361 cpus_and(mask, mask, cpu_online_map);
363 for_each_cpu_mask(cpu, mask)
364 xen_send_IPI_one(cpu, vector);
367 static void xen_smp_send_call_function_ipi(cpumask_t mask)
371 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
373 /* Make sure other vcpus get a chance to run if they need to. */
374 for_each_cpu_mask(cpu, mask) {
375 if (xen_vcpu_stolen(cpu)) {
376 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
382 static void xen_smp_send_call_function_single_ipi(int cpu)
384 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
387 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
390 generic_smp_call_function_interrupt();
392 __get_cpu_var(irq_stat).irq_call_count++;
394 add_pda(irq_call_count, 1);
401 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
404 generic_smp_call_function_single_interrupt();
406 __get_cpu_var(irq_stat).irq_call_count++;
408 add_pda(irq_call_count, 1);
415 static const struct smp_ops xen_smp_ops __initdata = {
416 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
417 .smp_prepare_cpus = xen_smp_prepare_cpus,
418 .cpu_up = xen_cpu_up,
419 .smp_cpus_done = xen_smp_cpus_done,
421 .smp_send_stop = xen_smp_send_stop,
422 .smp_send_reschedule = xen_smp_send_reschedule,
424 .send_call_func_ipi = xen_smp_send_call_function_ipi,
425 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
428 void __init xen_smp_init(void)
430 smp_ops = xen_smp_ops;
431 xen_fill_possible_map();