4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 * This does not handle HOTPLUG_CPU yet.
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/smp.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/events.h>
38 cpumask_t xen_cpu_initialized_map;
40 static DEFINE_PER_CPU(int, resched_irq);
41 static DEFINE_PER_CPU(int, callfunc_irq);
42 static DEFINE_PER_CPU(int, callfuncsingle_irq);
43 static DEFINE_PER_CPU(int, debug_irq) = -1;
45 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
46 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
49 * Reschedule call back. Nothing to do,
50 * all the work is done automatically when
51 * we return from the interrupt.
53 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
56 __get_cpu_var(irq_stat).irq_resched_count++;
58 add_pda(irq_resched_count, 1);
64 static __cpuinit void cpu_bringup_and_idle(void)
66 int cpu = smp_processor_id();
71 xen_enable_sysenter();
73 cpu = smp_processor_id();
74 smp_store_cpu_info(cpu);
75 cpu_data(cpu).x86_max_cores = 1;
76 set_cpu_sibling_map(cpu);
78 xen_setup_cpu_clockevents();
80 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE);
84 /* We can take interrupts now: we're officially "up". */
87 wmb(); /* make sure everything is out */
91 static int xen_smp_intr_init(unsigned int cpu)
94 const char *resched_name, *callfunc_name, *debug_name;
96 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
97 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
99 xen_reschedule_interrupt,
100 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
105 per_cpu(resched_irq, cpu) = rc;
107 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
108 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
110 xen_call_function_interrupt,
111 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
116 per_cpu(callfunc_irq, cpu) = rc;
118 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
119 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
120 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
124 per_cpu(debug_irq, cpu) = rc;
126 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
127 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
129 xen_call_function_single_interrupt,
130 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
135 per_cpu(callfuncsingle_irq, cpu) = rc;
140 if (per_cpu(resched_irq, cpu) >= 0)
141 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
142 if (per_cpu(callfunc_irq, cpu) >= 0)
143 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
144 if (per_cpu(debug_irq, cpu) >= 0)
145 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
146 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
147 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
152 static void __init xen_fill_possible_map(void)
156 for (i = 0; i < NR_CPUS; i++) {
157 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
159 cpu_set(i, cpu_possible_map);
163 static void __init xen_smp_prepare_boot_cpu(void)
165 BUG_ON(smp_processor_id() != 0);
166 native_smp_prepare_boot_cpu();
168 /* We've switched to the "real" per-cpu gdt, so make sure the
169 old memory can be recycled */
170 make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
172 xen_setup_vcpu_info_placement();
175 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
179 smp_store_cpu_info(0);
180 cpu_data(0).x86_max_cores = 1;
181 set_cpu_sibling_map(0);
183 if (xen_smp_intr_init(0))
186 xen_cpu_initialized_map = cpumask_of_cpu(0);
188 /* Restrict the possible_map according to max_cpus. */
189 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
190 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
192 cpu_clear(cpu, cpu_possible_map);
195 for_each_possible_cpu (cpu) {
196 struct task_struct *idle;
201 idle = fork_idle(cpu);
203 panic("failed fork for CPU %d", cpu);
205 cpu_set(cpu, cpu_present_map);
208 //init_xenbus_allowed_cpumask();
212 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
214 struct vcpu_guest_context *ctxt;
215 struct desc_struct *gdt;
217 if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
220 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
224 gdt = get_cpu_gdt_table(cpu);
226 ctxt->flags = VGCF_IN_KERNEL;
227 ctxt->user_regs.ds = __USER_DS;
228 ctxt->user_regs.es = __USER_DS;
229 ctxt->user_regs.ss = __KERNEL_DS;
231 ctxt->user_regs.fs = __KERNEL_PERCPU;
233 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
234 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
236 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
238 xen_copy_trap_info(ctxt->trap_ctxt);
242 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
243 make_lowmem_page_readonly(gdt);
245 ctxt->gdt_frames[0] = virt_to_mfn(gdt);
246 ctxt->gdt_ents = GDT_ENTRIES;
248 ctxt->user_regs.cs = __KERNEL_CS;
249 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
251 ctxt->kernel_ss = __KERNEL_DS;
252 ctxt->kernel_sp = idle->thread.sp0;
255 ctxt->event_callback_cs = __KERNEL_CS;
256 ctxt->failsafe_callback_cs = __KERNEL_CS;
258 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
259 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
261 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
262 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
264 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
271 static int __cpuinit xen_cpu_up(unsigned int cpu)
273 struct task_struct *idle = idle_task(cpu);
277 rc = cpu_up_check(cpu);
283 /* Allocate node local memory for AP pdas */
286 rc = get_local_pda(cpu);
294 per_cpu(current_task, cpu) = idle;
297 cpu_pda(cpu)->pcurrent = idle;
298 clear_tsk_thread_flag(idle, TIF_FORK);
300 xen_setup_timer(cpu);
302 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
304 /* make sure interrupts start blocked */
305 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
307 rc = cpu_initialize_context(cpu, idle);
311 if (num_online_cpus() == 1)
312 alternatives_smp_switch(1);
314 rc = xen_smp_intr_init(cpu);
318 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
321 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
322 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
329 static void xen_smp_cpus_done(unsigned int max_cpus)
333 static void stop_self(void *v)
335 int cpu = smp_processor_id();
337 /* make sure we're not pinning something down */
338 load_cr3(swapper_pg_dir);
339 /* should set up a minimal gdt */
341 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
345 static void xen_smp_send_stop(void)
347 smp_call_function(stop_self, NULL, 0);
350 static void xen_smp_send_reschedule(int cpu)
352 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
355 static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
359 cpus_and(mask, mask, cpu_online_map);
361 for_each_cpu_mask(cpu, mask)
362 xen_send_IPI_one(cpu, vector);
365 static void xen_smp_send_call_function_ipi(cpumask_t mask)
369 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
371 /* Make sure other vcpus get a chance to run if they need to. */
372 for_each_cpu_mask(cpu, mask) {
373 if (xen_vcpu_stolen(cpu)) {
374 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
380 static void xen_smp_send_call_function_single_ipi(int cpu)
382 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
385 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
388 generic_smp_call_function_interrupt();
390 __get_cpu_var(irq_stat).irq_call_count++;
392 add_pda(irq_call_count, 1);
399 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
402 generic_smp_call_function_single_interrupt();
404 __get_cpu_var(irq_stat).irq_call_count++;
406 add_pda(irq_call_count, 1);
413 static const struct smp_ops xen_smp_ops __initdata = {
414 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
415 .smp_prepare_cpus = xen_smp_prepare_cpus,
416 .cpu_up = xen_cpu_up,
417 .smp_cpus_done = xen_smp_cpus_done,
419 .smp_send_stop = xen_smp_send_stop,
420 .smp_send_reschedule = xen_smp_send_reschedule,
422 .send_call_func_ipi = xen_smp_send_call_function_ipi,
423 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
426 void __init xen_smp_init(void)
428 smp_ops = xen_smp_ops;
429 xen_fill_possible_map();