2 #include <asm/arch/irq.h>
3 #include <asm/arch/hwregs/intr_vect.h>
4 #include <asm/arch/hwregs/intr_vect_defs.h>
5 #include <asm/tlbflush.h>
6 #include <asm/mmu_context.h>
7 #include <asm/arch/hwregs/mmu_defs_asm.h>
8 #include <asm/arch/hwregs/supp_reg.h>
9 #include <asm/atomic.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/timex.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/cpumask.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #define IPI_SCHEDULE 1
22 #define IPI_FLUSH_TLB 4
24 #define FLUSH_ALL (void*)0xffffffff
26 /* Vector of locks used for various atomic operations */
27 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
30 cpumask_t cpu_online_map = CPU_MASK_NONE;
31 EXPORT_SYMBOL(cpu_online_map);
32 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
33 EXPORT_SYMBOL(phys_cpu_present_map);
35 /* Variables used during SMP boot */
36 volatile int cpu_now_booting = 0;
37 volatile struct thread_info *smp_init_current_idle_thread;
39 /* Variables used during IPI */
40 static DEFINE_SPINLOCK(call_lock);
41 static DEFINE_SPINLOCK(tlbstate_lock);
43 struct call_data_struct {
44 void (*func) (void *info);
49 static struct call_data_struct * call_data;
51 static struct mm_struct* flush_mm;
52 static struct vm_area_struct* flush_vma;
53 static unsigned long flush_addr;
55 extern int setup_irq(int, struct irqaction *);
58 static unsigned long irq_regs[NR_CPUS] =
64 static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs);
65 static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
66 static struct irqaction irq_ipi = {
67 .handler = crisv32_ipi_interrupt,
68 .flags = IRQF_DISABLED,
69 .mask = CPU_MASK_NONE,
73 extern void cris_mmu_init(void);
74 extern void cris_timer_init(void);
76 /* SMP initialization */
77 void __init smp_prepare_cpus(unsigned int max_cpus)
81 /* From now on we can expect IPIs so set them up */
82 setup_irq(IPI_INTR_VECT, &irq_ipi);
84 /* Mark all possible CPUs as present */
85 for (i = 0; i < max_cpus; i++)
86 cpu_set(i, phys_cpu_present_map);
89 void __devinit smp_prepare_boot_cpu(void)
91 /* PGD pointer has moved after per_cpu initialization so
95 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
100 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
102 cpu_set(0, cpu_online_map);
103 cpu_set(0, phys_cpu_present_map);
106 void __init smp_cpus_done(unsigned int max_cpus)
110 /* Bring one cpu online.*/
112 smp_boot_one_cpu(int cpuid)
115 struct task_struct *idle;
117 idle = fork_idle(cpuid);
119 panic("SMP: fork failed for CPU:%d", cpuid);
121 task_thread_info(idle)->cpu = cpuid;
123 /* Information to the CPU that is about to boot */
124 smp_init_current_idle_thread = task_thread_info(idle);
125 cpu_now_booting = cpuid;
127 /* Wait for CPU to come online */
128 for (timeout = 0; timeout < 10000; timeout++) {
129 if(cpu_online(cpuid)) {
131 smp_init_current_idle_thread = NULL;
132 return 0; /* CPU online */
138 put_task_struct(idle);
141 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
145 /* Secondary CPUs starts uing C here. Here we need to setup CPU
146 * specific stuff such as the local timer and the MMU. */
147 void __init smp_callin(void)
149 extern void cpu_idle(void);
151 int cpu = cpu_now_booting;
152 reg_intr_vect_rw_mask vect_mask = {0};
154 /* Initialise the idle task for this CPU */
155 atomic_inc(&init_mm.mm_count);
156 current->active_mm = &init_mm;
162 /* Setup local timer. */
165 /* Enable IRQ and idle */
166 REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
167 unmask_irq(IPI_INTR_VECT);
168 unmask_irq(TIMER_INTR_VECT);
172 cpu_set(cpu, cpu_online_map);
176 /* Stop execution on this CPU.*/
177 void stop_this_cpu(void* dummy)
180 asm volatile("halt");
184 void smp_send_stop(void)
186 smp_call_function(stop_this_cpu, NULL, 1, 0);
189 int setup_profiling_timer(unsigned int multiplier)
195 /* cache_decay_ticks is used by the scheduler to decide if a process
196 * is "hot" on one CPU. A higher value means a higher penalty to move
197 * a process to another CPU. Our cache is rather small so we report
200 unsigned long cache_decay_ticks = 1;
202 int __cpuinit __cpu_up(unsigned int cpu)
204 smp_boot_one_cpu(cpu);
205 return cpu_online(cpu) ? 0 : -ENOSYS;
208 void smp_send_reschedule(int cpu)
210 cpumask_t cpu_mask = CPU_MASK_NONE;
211 cpu_set(cpu, cpu_mask);
212 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
217 * Flush needs to be done on the local CPU and on any other CPU that
218 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
219 * of which CPUs that a specific process has been executed on.
221 void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
226 spin_lock_irqsave(&tlbstate_lock, flags);
227 cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask);
228 cpu_clear(smp_processor_id(), cpu_mask);
232 send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
233 spin_unlock_irqrestore(&tlbstate_lock, flags);
236 void flush_tlb_all(void)
239 flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
242 void flush_tlb_mm(struct mm_struct *mm)
245 flush_tlb_common(mm, FLUSH_ALL, 0);
246 /* No more mappings in other CPUs */
247 cpus_clear(mm->cpu_vm_mask);
248 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
251 void flush_tlb_page(struct vm_area_struct *vma,
254 __flush_tlb_page(vma, addr);
255 flush_tlb_common(vma->vm_mm, vma, addr);
258 /* Inter processor interrupts
260 * The IPIs are used for:
261 * * Force a schedule on a CPU
262 * * FLush TLB on other CPUs
263 * * Call a function on other CPUs
266 int send_ipi(int vector, int wait, cpumask_t cpu_mask)
269 reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
272 /* Calculate CPUs to send to. */
273 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
276 for_each_cpu_mask(i, cpu_mask)
278 ipi.vector |= vector;
279 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
282 /* Wait for IPI to finish on other CPUS */
284 for_each_cpu_mask(i, cpu_mask) {
286 for (j = 0 ; j < 1000; j++) {
287 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
295 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
305 * You must not call this function with disabled interrupts or from a
306 * hardware interrupt handler or from a bottom half handler.
308 int smp_call_function(void (*func)(void *info), void *info,
309 int nonatomic, int wait)
311 cpumask_t cpu_mask = CPU_MASK_ALL;
312 struct call_data_struct data;
315 cpu_clear(smp_processor_id(), cpu_mask);
317 WARN_ON(irqs_disabled());
323 spin_lock(&call_lock);
325 ret = send_ipi(IPI_CALL, wait, cpu_mask);
326 spin_unlock(&call_lock);
331 irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
333 void (*func) (void *info) = call_data->func;
334 void *info = call_data->info;
335 reg_intr_vect_rw_ipi ipi;
337 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
339 if (ipi.vector & IPI_CALL) {
342 if (ipi.vector & IPI_FLUSH_TLB) {
343 if (flush_mm == FLUSH_ALL)
345 else if (flush_vma == FLUSH_ALL)
346 __flush_tlb_mm(flush_mm);
348 __flush_tlb_page(flush_vma, flush_addr);
352 REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);