Patches to support SMP.
* Each CPU has its own current_pgd.
* flush_tlb_range is implemented as flush_tlb_mm.
* Atomic operations implemented with spinlocks.
* Semaphores implemented with spinlocks.
Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/arch/svinto.h>
+#include <asm/mmu_context.h>
 
 /* debug of low-level TLB reload */
 #undef DEBUG
 #define D(x)
 #endif
 
-extern volatile pgd_t *current_pgd;
-
 extern const struct exception_table_entry
        *search_exception_tables(unsigned long addr);
 
        int page_id;
        int acc, inv;
 #endif
-       pgd_t* pgd = (pgd_t*)current_pgd;
+       pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id());
        pmd_t *pmd;
        pte_t pte;
        int miss, we, writeac;
        *R_TLB_LO = pte_val(pte);
        local_irq_restore(flags);
 }
-
-/* Called from arch/cris/mm/fault.c to find fixup code. */
-int
-find_fixup_code(struct pt_regs *regs)
-{
-       const struct exception_table_entry *fixup;
-
-       if ((fixup = search_exception_tables(regs->irp)) != 0) {
-               /* Adjust the instruction pointer in the stackframe. */
-               regs->irp = fixup->fixup;
-               
-               /* 
-                * Don't return by restoring the CPU state, so switch
-                * frame-type. 
-                */
-               regs->frametype = CRIS_FRAME_NORMAL;
-               return 1;
-       }
-
-       return 0;
-}
 
         *  switch_mm)
         */
 
-       current_pgd = init_mm.pgd;
+       per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
 
        /* initialise the TLB (tlb.c) */
 
 
        local_irq_restore(flags);
 }
 
-/* invalidate a page range */
-
-void
-flush_tlb_range(struct vm_area_struct *vma, 
-               unsigned long start,
-               unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       int page_id = mm->context.page_id;
-       int i;
-       unsigned long flags;
-
-       D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
-                start, end, page_id, mm));
-
-       if(page_id == NO_CONTEXT)
-               return;
-
-       start &= PAGE_MASK;  /* probably not necessary */
-       end &= PAGE_MASK;    /* dito */
-
-       /* invalidate those TLB entries that match both the mm context
-        * and the virtual address range
-        */
-
-       local_save_flags(flags);
-       local_irq_disable();
-       for(i = 0; i < NUM_TLB_ENTRIES; i++) {
-               unsigned long tlb_hi, vpn;
-               *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
-               tlb_hi = *R_TLB_HI;
-               vpn = tlb_hi & PAGE_MASK;
-               if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
-                   vpn >= start && vpn < end) {
-                       *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
-                                     IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
-                       
-                       *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
-                                     IO_STATE(R_TLB_LO, valid, no  ) |
-                                     IO_STATE(R_TLB_LO, kernel,no  ) |
-                                     IO_STATE(R_TLB_LO, we,    no  ) |
-                                     IO_FIELD(R_TLB_LO, pfn,   0   ) );
-               }
-       }
-       local_irq_restore(flags);
-}
-
 /* dump the entire TLB for debug purposes */
 
 #if 0
         * the pgd.
         */
 
-       current_pgd = next->pgd;
+       per_cpu(current_pgd, smp_processor_id()) = next->pgd;
 
        /* switch context in the MMU */
        
 
--- /dev/null
+#ifndef __ASM_CRIS_ARCH_ATOMIC__
+#define __ASM_CRIS_ARCH_ATOMIC__
+
+#define cris_atomic_save(addr, flags) local_irq_save(flags);
+#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
+
+#endif
 
 #define __ASM_CRIS_ATOMIC__
 
 #include <asm/system.h>
+#include <asm/arch/atomic.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc..
  */
 
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-
-#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
-
-typedef struct { int counter; } atomic_t;
+typedef struct { volatile int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)  { (i) }
 
 extern __inline__ void atomic_add(int i, volatile atomic_t *v)
 {
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        v->counter += i;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
 }
 
 extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
 {
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        v->counter -= i;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
 }
 
 extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
 {
        unsigned long flags;
        int retval;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = (v->counter += i);
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 {
        unsigned long flags;
        int retval;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = (v->counter -= i);
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 {
        int retval;
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = (v->counter -= i) == 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 extern __inline__ void atomic_inc(volatile atomic_t *v)
 {
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        (v->counter)++;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
 }
 
 extern __inline__ void atomic_dec(volatile atomic_t *v)
 {
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        (v->counter)--;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
 }
 
 extern __inline__ int atomic_inc_return(volatile atomic_t *v)
 {
        unsigned long flags;
        int retval;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = (v->counter)++;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 {
        unsigned long flags;
        int retval;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = (v->counter)--;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
 {
        int retval;
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = --(v->counter) == 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 {
        int retval;
        unsigned long flags;
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(v, flags);
        retval = ++(v->counter) == 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(v, flags);
        return retval;
 }
 
 
  * registers like cr3 on the i386
  */
 
-extern volatile pgd_t *current_pgd;   /* defined in arch/cris/mm/fault.c */
+extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 
        might_sleep();
 
        /* atomically decrement the semaphores count, and if its negative, we wait */
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(sem, flags);
        failed = --(sem->count.counter) < 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(sem, flags);
        if(failed) {
                __down(sem);
        }
        might_sleep();
 
        /* atomically decrement the semaphores count, and if its negative, we wait */
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(sem, flags);
        failed = --(sem->count.counter) < 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(sem, flags);
        if(failed)
                failed = __down_interruptible(sem);
        return(failed);
        unsigned long flags;
        int failed;
 
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(sem, flags);
        failed = --(sem->count.counter) < 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(sem, flags);
        if(failed)
                failed = __down_trylock(sem);
        return(failed);
+
 }
 
 /*
        int wakeup;
 
        /* atomically increment the semaphores count, and if it was negative, we wake people */
-       local_save_flags(flags);
-       local_irq_disable();
+       cris_atomic_save(sem, flags);
        wakeup = ++(sem->count.counter) <= 0;
-       local_irq_restore(flags);
+       cris_atomic_restore(sem, flags);
        if(wakeup) {
                __up(sem);
        }
 
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+#include <linux/cpumask.h>
+
+extern cpumask_t phys_cpu_present_map;
+#define cpu_possible_map phys_cpu_present_map
+
+#define __smp_processor_id() (current_thread_info()->cpu)
+
 #endif
 
--- /dev/null
+#include <asm/arch/spinlock.h>
 
  *
  */
 
+extern void __flush_tlb_all(void);
+extern void __flush_tlb_mm(struct mm_struct *mm);
+extern void __flush_tlb_page(struct vm_area_struct *vma,
+                          unsigned long addr);
+
+#ifdef CONFIG_SMP
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
 extern void flush_tlb_page(struct vm_area_struct *vma, 
                           unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma,
-                           unsigned long start,
-                           unsigned long end);
+#else
+#define flush_tlb_all __flush_tlb_all
+#define flush_tlb_mm __flush_tlb_mm
+#define flush_tlb_page __flush_tlb_page
+#endif
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
 
 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
                                       unsigned long start, unsigned long end)