]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge ../linux-2.6 by hand
authorPaul Mackerras <paulus@samba.org>
Mon, 31 Oct 2005 02:37:12 +0000 (13:37 +1100)
committerPaul Mackerras <paulus@samba.org>
Mon, 31 Oct 2005 02:37:12 +0000 (13:37 +1100)
15 files changed:
1  2 
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable_64.c
arch/ppc/kernel/time.c
arch/ppc/platforms/hdpu.c
drivers/char/mem.c
drivers/char/viotape.c
drivers/net/bmac.c
drivers/net/ibmveth.c
drivers/net/mace.c
drivers/pcmcia/Makefile
drivers/video/fbmem.c
include/asm-powerpc/rwsem.h
include/asm-powerpc/semaphore.h
include/asm-powerpc/unistd.h
include/asm-ppc64/pgtable.h

diff --combined arch/powerpc/mm/mem.c
index e43e8ef7008812f817ba29fb81a1bf1ba30202f4,0000000000000000000000000000000000000000..117b00012e144b0221fe1bd254a0e291571cfcc6
mode 100644,000000..100644
--- /dev/null
@@@ -1,484 -1,0 +1,564 @@@
 +/*
 + *  PowerPC version
 + *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 + *
 + *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 + *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 + *    Copyright (C) 1996 Paul Mackerras
 + *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 + *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 + *
 + *  Derived from "arch/i386/mm/init.c"
 + *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 + *
 + *  This program is free software; you can redistribute it and/or
 + *  modify it under the terms of the GNU General Public License
 + *  as published by the Free Software Foundation; either version
 + *  2 of the License, or (at your option) any later version.
 + *
 + */
 +
 +#include <linux/config.h>
 +#include <linux/module.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
 +#include <linux/errno.h>
 +#include <linux/string.h>
 +#include <linux/types.h>
 +#include <linux/mm.h>
 +#include <linux/stddef.h>
 +#include <linux/init.h>
 +#include <linux/bootmem.h>
 +#include <linux/highmem.h>
 +#include <linux/initrd.h>
 +#include <linux/pagemap.h>
 +
 +#include <asm/pgalloc.h>
 +#include <asm/prom.h>
 +#include <asm/io.h>
 +#include <asm/mmu_context.h>
 +#include <asm/pgtable.h>
 +#include <asm/mmu.h>
 +#include <asm/smp.h>
 +#include <asm/machdep.h>
 +#include <asm/btext.h>
 +#include <asm/tlb.h>
 +#include <asm/prom.h>
 +#include <asm/lmb.h>
 +#include <asm/sections.h>
 +#ifdef CONFIG_PPC64
 +#include <asm/vdso.h>
 +#endif
 +
 +#include "mmu_decl.h"
 +
 +#ifndef CPU_FTR_COHERENT_ICACHE
 +#define CPU_FTR_COHERENT_ICACHE       0       /* XXX for now */
 +#define CPU_FTR_NOEXECUTE     0
 +#endif
 +
 +int init_bootmem_done;
 +int mem_init_done;
 +unsigned long memory_limit;
 +
 +/*
 + * This is called by /dev/mem to know if a given address has to
 + * be mapped non-cacheable or not
 + */
 +int page_is_ram(unsigned long pfn)
 +{
 +      unsigned long paddr = (pfn << PAGE_SHIFT);
 +
 +#ifndef CONFIG_PPC64  /* XXX for now */
 +      return paddr < __pa(high_memory);
 +#else
 +      int i;
 +      for (i=0; i < lmb.memory.cnt; i++) {
 +              unsigned long base;
 +
 +              base = lmb.memory.region[i].base;
 +
 +              if ((paddr >= base) &&
 +                      (paddr < (base + lmb.memory.region[i].size))) {
 +                      return 1;
 +              }
 +      }
 +
 +      return 0;
 +#endif
 +}
 +EXPORT_SYMBOL(page_is_ram);
 +
 +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 +                            unsigned long size, pgprot_t vma_prot)
 +{
 +      if (ppc_md.phys_mem_access_prot)
 +              return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 +
 +      if (!page_is_ram(pfn))
 +              vma_prot = __pgprot(pgprot_val(vma_prot)
 +                                  | _PAGE_GUARDED | _PAGE_NO_CACHE);
 +      return vma_prot;
 +}
 +EXPORT_SYMBOL(phys_mem_access_prot);
 +
++#ifdef CONFIG_MEMORY_HOTPLUG
++
++void online_page(struct page *page)
++{
++      ClearPageReserved(page);
++      free_cold_page(page);
++      totalram_pages++;
++      num_physpages++;
++}
++
++/*
++ * This works only for the non-NUMA case.  Later, we'll need a lookup
++ * to convert from real physical addresses to nid, that doesn't use
++ * pfn_to_nid().
++ */
++int __devinit add_memory(u64 start, u64 size)
++{
++      struct pglist_data *pgdata = NODE_DATA(0);
++      struct zone *zone;
++      unsigned long start_pfn = start >> PAGE_SHIFT;
++      unsigned long nr_pages = size >> PAGE_SHIFT;
++
++      /* this should work for most non-highmem platforms */
++      zone = pgdata->node_zones;
++
++      return __add_pages(zone, start_pfn, nr_pages);
++
++      return 0;
++}
++
++/*
++ * First pass at this code will check to determine if the remove
++ * request is within the RMO.  Do not allow removal within the RMO.
++ */
++int __devinit remove_memory(u64 start, u64 size)
++{
++      struct zone *zone;
++      unsigned long start_pfn, end_pfn, nr_pages;
++
++      start_pfn = start >> PAGE_SHIFT;
++      nr_pages = size >> PAGE_SHIFT;
++      end_pfn = start_pfn + nr_pages;
++
++      printk("%s(): Attempting to remove memoy in range "
++                      "%lx to %lx\n", __func__, start, start+size);
++      /*
++       * check for range within RMO
++       */
++      zone = page_zone(pfn_to_page(start_pfn));
++
++      printk("%s(): memory will be removed from "
++                      "the %s zone\n", __func__, zone->name);
++
++      /*
++       * not handling removing memory ranges that
++       * overlap multiple zones yet
++       */
++      if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
++              goto overlap;
++
++      /* make sure it is NOT in RMO */
++      if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
++              printk("%s(): range to be removed must NOT be in RMO!\n",
++                      __func__);
++              goto in_rmo;
++      }
++
++      return __remove_pages(zone, start_pfn, nr_pages);
++
++overlap:
++      printk("%s(): memory range to be removed overlaps "
++              "multiple zones!!!\n", __func__);
++in_rmo:
++      return -1;
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
 +void show_mem(void)
 +{
 +      unsigned long total = 0, reserved = 0;
 +      unsigned long shared = 0, cached = 0;
 +      unsigned long highmem = 0;
 +      struct page *page;
 +      pg_data_t *pgdat;
 +      unsigned long i;
 +
 +      printk("Mem-info:\n");
 +      show_free_areas();
 +      printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 +      for_each_pgdat(pgdat) {
++              unsigned long flags;
++              pgdat_resize_lock(pgdat, &flags);
 +              for (i = 0; i < pgdat->node_spanned_pages; i++) {
 +                      page = pgdat_page_nr(pgdat, i);
 +                      total++;
 +                      if (PageHighMem(page))
 +                              highmem++;
 +                      if (PageReserved(page))
 +                              reserved++;
 +                      else if (PageSwapCache(page))
 +                              cached++;
 +                      else if (page_count(page))
 +                              shared += page_count(page) - 1;
 +              }
++              pgdat_resize_unlock(pgdat, &flags);
 +      }
 +      printk("%ld pages of RAM\n", total);
 +#ifdef CONFIG_HIGHMEM
 +      printk("%ld pages of HIGHMEM\n", highmem);
 +#endif
 +      printk("%ld reserved pages\n", reserved);
 +      printk("%ld pages shared\n", shared);
 +      printk("%ld pages swap cached\n", cached);
 +}
 +
 +/*
 + * Initialize the bootmem system and give it all the memory we
 + * have available.  If we are using highmem, we only put the
 + * lowmem into the bootmem system.
 + */
 +#ifndef CONFIG_NEED_MULTIPLE_NODES
 +void __init do_init_bootmem(void)
 +{
 +      unsigned long i;
 +      unsigned long start, bootmap_pages;
 +      unsigned long total_pages;
 +      int boot_mapsize;
 +
 +      max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
 +#ifdef CONFIG_HIGHMEM
 +      total_pages = total_lowmem >> PAGE_SHIFT;
 +#endif
 +
 +      /*
 +       * Find an area to use for the bootmem bitmap.  Calculate the size of
 +       * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 +       * Add 1 additional page in case the address isn't page-aligned.
 +       */
 +      bootmap_pages = bootmem_bootmap_pages(total_pages);
 +
 +      start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 +      BUG_ON(!start);
 +
 +      boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
 +
 +      /* Add all physical memory to the bootmem map, mark each area
 +       * present.
 +       */
 +      for (i = 0; i < lmb.memory.cnt; i++) {
 +              unsigned long base = lmb.memory.region[i].base;
 +              unsigned long size = lmb_size_bytes(&lmb.memory, i);
 +#ifdef CONFIG_HIGHMEM
 +              if (base >= total_lowmem)
 +                      continue;
 +              if (base + size > total_lowmem)
 +                      size = total_lowmem - base;
 +#endif
 +              free_bootmem(base, size);
 +      }
 +
 +      /* reserve the sections we're already using */
 +      for (i = 0; i < lmb.reserved.cnt; i++)
 +              reserve_bootmem(lmb.reserved.region[i].base,
 +                              lmb_size_bytes(&lmb.reserved, i));
 +
 +      /* XXX need to clip this if using highmem? */
 +      for (i = 0; i < lmb.memory.cnt; i++)
 +              memory_present(0, lmb_start_pfn(&lmb.memory, i),
 +                             lmb_end_pfn(&lmb.memory, i));
 +      init_bootmem_done = 1;
 +}
 +
 +/*
 + * paging_init() sets up the page tables - in fact we've already done this.
 + */
 +void __init paging_init(void)
 +{
 +      unsigned long zones_size[MAX_NR_ZONES];
 +      unsigned long zholes_size[MAX_NR_ZONES];
 +      unsigned long total_ram = lmb_phys_mem_size();
 +      unsigned long top_of_ram = lmb_end_of_DRAM();
 +
 +#ifdef CONFIG_HIGHMEM
 +      map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
 +      pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
 +                      (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
 +      map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
 +      kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
 +                      (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
 +      kmap_prot = PAGE_KERNEL;
 +#endif /* CONFIG_HIGHMEM */
 +
 +      printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 +             top_of_ram, total_ram);
 +      printk(KERN_INFO "Memory hole size: %ldMB\n",
 +             (top_of_ram - total_ram) >> 20);
 +      /*
 +       * All pages are DMA-able so we put them all in the DMA zone.
 +       */
 +      memset(zones_size, 0, sizeof(zones_size));
 +      memset(zholes_size, 0, sizeof(zholes_size));
 +
 +      zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 +      zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +
 +#ifdef CONFIG_HIGHMEM
 +      zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
 +      zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
 +      zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +#else
 +      zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 +      zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +#endif /* CONFIG_HIGHMEM */
 +
 +      free_area_init_node(0, NODE_DATA(0), zones_size,
 +                          __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 +}
 +#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 +
 +void __init mem_init(void)
 +{
 +#ifdef CONFIG_NEED_MULTIPLE_NODES
 +      int nid;
 +#endif
 +      pg_data_t *pgdat;
 +      unsigned long i;
 +      struct page *page;
 +      unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 +
 +      num_physpages = max_pfn;        /* RAM is assumed contiguous */
 +      high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 +
 +#ifdef CONFIG_NEED_MULTIPLE_NODES
 +        for_each_online_node(nid) {
 +              if (NODE_DATA(nid)->node_spanned_pages != 0) {
 +                      printk("freeing bootmem node %x\n", nid);
 +                      totalram_pages +=
 +                              free_all_bootmem_node(NODE_DATA(nid));
 +              }
 +      }
 +#else
 +      max_mapnr = num_physpages;
 +      totalram_pages += free_all_bootmem();
 +#endif
 +      for_each_pgdat(pgdat) {
 +              for (i = 0; i < pgdat->node_spanned_pages; i++) {
 +                      page = pgdat_page_nr(pgdat, i);
 +                      if (PageReserved(page))
 +                              reservedpages++;
 +              }
 +      }
 +
 +      codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
 +      datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
 +      initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 +      bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 +
 +#ifdef CONFIG_HIGHMEM
 +      {
 +              unsigned long pfn, highmem_mapnr;
 +
 +              highmem_mapnr = total_lowmem >> PAGE_SHIFT;
 +              for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 +                      struct page *page = pfn_to_page(pfn);
 +
 +                      ClearPageReserved(page);
 +                      set_page_count(page, 1);
 +                      __free_page(page);
 +                      totalhigh_pages++;
 +              }
 +              totalram_pages += totalhigh_pages;
 +              printk(KERN_INFO "High memory: %luk\n",
 +                     totalhigh_pages << (PAGE_SHIFT-10));
 +      }
 +#endif /* CONFIG_HIGHMEM */
 +
 +      printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 +             "%luk reserved, %luk data, %luk bss, %luk init)\n",
 +              (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
 +              num_physpages << (PAGE_SHIFT-10),
 +              codesize >> 10,
 +              reservedpages << (PAGE_SHIFT-10),
 +              datasize >> 10,
 +              bsssize >> 10,
 +              initsize >> 10);
 +
 +      mem_init_done = 1;
 +
 +#ifdef CONFIG_PPC64
 +      /* Initialize the vDSO */
 +      vdso_init();
 +#endif
 +}
 +
 +/*
 + * This is called when a page has been modified by the kernel.
 + * It just marks the page as not i-cache clean.  We do the i-cache
 + * flush later when the page is given to a user process, if necessary.
 + */
 +void flush_dcache_page(struct page *page)
 +{
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &page->flags))
 +              clear_bit(PG_arch_1, &page->flags);
 +}
 +EXPORT_SYMBOL(flush_dcache_page);
 +
 +void flush_dcache_icache_page(struct page *page)
 +{
 +#ifdef CONFIG_BOOKE
 +      void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
 +      __flush_dcache_icache(start);
 +      kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
 +#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
 +      /* On 8xx there is no need to kmap since highmem is not supported */
 +      __flush_dcache_icache(page_address(page)); 
 +#else
 +      __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 +#endif
 +
 +}
 +void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 +{
 +      clear_page(page);
 +
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +      /*
 +       * We shouldnt have to do this, but some versions of glibc
 +       * require it (ld.so assumes zero filled pages are icache clean)
 +       * - Anton
 +       */
 +
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &pg->flags))
 +              clear_bit(PG_arch_1, &pg->flags);
 +}
 +EXPORT_SYMBOL(clear_user_page);
 +
 +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 +                  struct page *pg)
 +{
 +      copy_page(vto, vfrom);
 +
 +      /*
 +       * We should be able to use the following optimisation, however
 +       * there are two problems.
 +       * Firstly a bug in some versions of binutils meant PLT sections
 +       * were not marked executable.
 +       * Secondly the first word in the GOT section is blrl, used
 +       * to establish the GOT address. Until recently the GOT was
 +       * not marked executable.
 +       * - Anton
 +       */
 +#if 0
 +      if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
 +              return;
 +#endif
 +
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &pg->flags))
 +              clear_bit(PG_arch_1, &pg->flags);
 +}
 +
 +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 +                           unsigned long addr, int len)
 +{
 +      unsigned long maddr;
 +
 +      maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
 +      flush_icache_range(maddr, maddr + len);
 +      kunmap(page);
 +}
 +EXPORT_SYMBOL(flush_icache_user_range);
 +
 +/*
 + * This is called at the end of handling a user page fault, when the
 + * fault has been handled by updating a PTE in the linux page tables.
 + * We use it to preload an HPTE into the hash table corresponding to
 + * the updated linux PTE.
 + * 
 + * This must always be called with the mm->page_table_lock held
 + */
 +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 +                    pte_t pte)
 +{
 +      /* handle i-cache coherency */
 +      unsigned long pfn = pte_pfn(pte);
 +#ifdef CONFIG_PPC32
 +      pmd_t *pmd;
 +#else
 +      unsigned long vsid;
 +      void *pgdir;
 +      pte_t *ptep;
 +      int local = 0;
 +      cpumask_t tmp;
 +      unsigned long flags;
 +#endif
 +
 +      /* handle i-cache coherency */
 +      if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
 +          !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
 +          pfn_valid(pfn)) {
 +              struct page *page = pfn_to_page(pfn);
 +              if (!PageReserved(page)
 +                  && !test_bit(PG_arch_1, &page->flags)) {
 +                      if (vma->vm_mm == current->active_mm) {
 +#ifdef CONFIG_8xx
 +                      /* On 8xx, cache control instructions (particularly 
 +                       * "dcbst" from flush_dcache_icache) fault as write 
 +                       * operation if there is an unpopulated TLB entry 
 +                       * for the address in question. To workaround that, 
 +                       * we invalidate the TLB here, thus avoiding dcbst 
 +                       * misbehaviour.
 +                       */
 +                              _tlbie(address);
 +#endif
 +                              __flush_dcache_icache((void *) address);
 +                      } else
 +                              flush_dcache_icache_page(page);
 +                      set_bit(PG_arch_1, &page->flags);
 +              }
 +      }
 +
 +#ifdef CONFIG_PPC_STD_MMU
 +      /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
 +      if (!pte_young(pte) || address >= TASK_SIZE)
 +              return;
 +#ifdef CONFIG_PPC32
 +      if (Hash == 0)
 +              return;
 +      pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
 +      if (!pmd_none(*pmd))
 +              add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
 +#else
 +      pgdir = vma->vm_mm->pgd;
 +      if (pgdir == NULL)
 +              return;
 +
 +      ptep = find_linux_pte(pgdir, address);
 +      if (!ptep)
 +              return;
 +
 +      vsid = get_vsid(vma->vm_mm->context.id, address);
 +
 +      local_irq_save(flags);
 +      tmp = cpumask_of_cpu(smp_processor_id());
 +      if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 +              local = 1;
 +
 +      __hash_page(address, 0, vsid, ptep, 0x300, local);
 +      local_irq_restore(flags);
 +#endif
 +#endif
 +}
index 484d24f9208bcf839b129249ab64d640c94bcf7b,0000000000000000000000000000000000000000..b79a7820613558181d16a8d25dce4687e6a3c7e3
mode 100644,000000..100644
--- /dev/null
@@@ -1,349 -1,0 +1,347 @@@
-               spin_lock(&init_mm.page_table_lock);
 +/*
 + *  This file contains ioremap and related functions for 64-bit machines.
 + *
 + *  Derived from arch/ppc64/mm/init.c
 + *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 + *
 + *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
 + *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 + *    Copyright (C) 1996 Paul Mackerras
 + *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 + *
 + *  Derived from "arch/i386/mm/init.c"
 + *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 + *
 + *  Dave Engebretsen <engebret@us.ibm.com>
 + *      Rework for PPC64 port.
 + *
 + *  This program is free software; you can redistribute it and/or
 + *  modify it under the terms of the GNU General Public License
 + *  as published by the Free Software Foundation; either version
 + *  2 of the License, or (at your option) any later version.
 + *
 + */
 +
 +#include <linux/config.h>
 +#include <linux/signal.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
 +#include <linux/errno.h>
 +#include <linux/string.h>
 +#include <linux/types.h>
 +#include <linux/mman.h>
 +#include <linux/mm.h>
 +#include <linux/swap.h>
 +#include <linux/stddef.h>
 +#include <linux/vmalloc.h>
 +#include <linux/init.h>
 +#include <linux/delay.h>
 +#include <linux/bootmem.h>
 +#include <linux/highmem.h>
 +#include <linux/idr.h>
 +#include <linux/nodemask.h>
 +#include <linux/module.h>
 +
 +#include <asm/pgalloc.h>
 +#include <asm/page.h>
 +#include <asm/prom.h>
 +#include <asm/lmb.h>
 +#include <asm/rtas.h>
 +#include <asm/io.h>
 +#include <asm/mmu_context.h>
 +#include <asm/pgtable.h>
 +#include <asm/mmu.h>
 +#include <asm/uaccess.h>
 +#include <asm/smp.h>
 +#include <asm/machdep.h>
 +#include <asm/tlb.h>
 +#include <asm/eeh.h>
 +#include <asm/processor.h>
 +#include <asm/mmzone.h>
 +#include <asm/cputable.h>
 +#include <asm/ppcdebug.h>
 +#include <asm/sections.h>
 +#include <asm/system.h>
 +#include <asm/iommu.h>
 +#include <asm/abs_addr.h>
 +#include <asm/vdso.h>
 +#include <asm/imalloc.h>
 +
 +unsigned long ioremap_bot = IMALLOC_BASE;
 +static unsigned long phbs_io_bot = PHBS_IO_BASE;
 +
 +#ifdef CONFIG_PPC_ISERIES
 +
 +void __iomem *ioremap(unsigned long addr, unsigned long size)
 +{
 +      return (void __iomem *)addr;
 +}
 +
 +extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
 +                     unsigned long flags)
 +{
 +      return (void __iomem *)addr;
 +}
 +
 +void iounmap(volatile void __iomem *addr)
 +{
 +      return;
 +}
 +
 +#else
 +
 +/*
 + * map_io_page currently only called by __ioremap
 + * map_io_page adds an entry to the ioremap page table
 + * and adds an entry to the HPT, possibly bolting it
 + */
 +static int map_io_page(unsigned long ea, unsigned long pa, int flags)
 +{
 +      pgd_t *pgdp;
 +      pud_t *pudp;
 +      pmd_t *pmdp;
 +      pte_t *ptep;
 +      unsigned long vsid;
 +
 +      if (mem_init_done) {
-               ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
 +              pgdp = pgd_offset_k(ea);
 +              pudp = pud_alloc(&init_mm, pgdp, ea);
 +              if (!pudp)
 +                      return -ENOMEM;
 +              pmdp = pmd_alloc(&init_mm, pudp, ea);
 +              if (!pmdp)
 +                      return -ENOMEM;
-               spin_unlock(&init_mm.page_table_lock);
++              ptep = pte_alloc_kernel(pmdp, ea);
 +              if (!ptep)
 +                      return -ENOMEM;
 +              set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 +                                                        __pgprot(flags)));
 +      } else {
 +              unsigned long va, vpn, hash, hpteg;
 +
 +              /*
 +               * If the mm subsystem is not fully up, we cannot create a
 +               * linux page table entry for this mapping.  Simply bolt an
 +               * entry in the hardware page table.
 +               */
 +              vsid = get_kernel_vsid(ea);
 +              va = (vsid << 28) | (ea & 0xFFFFFFF);
 +              vpn = va >> PAGE_SHIFT;
 +
 +              hash = hpt_hash(vpn, 0);
 +
 +              hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 +
 +              /* Panic if a pte grpup is full */
 +              if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
 +                                     HPTE_V_BOLTED,
 +                                     _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
 +                  == -1) {
 +                      panic("map_io_page: could not insert mapping");
 +              }
 +      }
 +      return 0;
 +}
 +
 +
 +static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
 +                          unsigned long ea, unsigned long size,
 +                          unsigned long flags)
 +{
 +      unsigned long i;
 +
 +      if ((flags & _PAGE_PRESENT) == 0)
 +              flags |= pgprot_val(PAGE_KERNEL);
 +
 +      for (i = 0; i < size; i += PAGE_SIZE)
 +              if (map_io_page(ea+i, pa+i, flags))
 +                      return NULL;
 +
 +      return (void __iomem *) (ea + (addr & ~PAGE_MASK));
 +}
 +
 +
 +void __iomem *
 +ioremap(unsigned long addr, unsigned long size)
 +{
 +      return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
 +}
 +
 +void __iomem * __ioremap(unsigned long addr, unsigned long size,
 +                       unsigned long flags)
 +{
 +      unsigned long pa, ea;
 +      void __iomem *ret;
 +
 +      /*
 +       * Choose an address to map it to.
 +       * Once the imalloc system is running, we use it.
 +       * Before that, we map using addresses going
 +       * up from ioremap_bot.  imalloc will use
 +       * the addresses from ioremap_bot through
 +       * IMALLOC_END
 +       * 
 +       */
 +      pa = addr & PAGE_MASK;
 +      size = PAGE_ALIGN(addr + size) - pa;
 +
 +      if (size == 0)
 +              return NULL;
 +
 +      if (mem_init_done) {
 +              struct vm_struct *area;
 +              area = im_get_free_area(size);
 +              if (area == NULL)
 +                      return NULL;
 +              ea = (unsigned long)(area->addr);
 +              ret = __ioremap_com(addr, pa, ea, size, flags);
 +              if (!ret)
 +                      im_free(area->addr);
 +      } else {
 +              ea = ioremap_bot;
 +              ret = __ioremap_com(addr, pa, ea, size, flags);
 +              if (ret)
 +                      ioremap_bot += size;
 +      }
 +      return ret;
 +}
 +
 +#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
 +
 +int __ioremap_explicit(unsigned long pa, unsigned long ea,
 +                     unsigned long size, unsigned long flags)
 +{
 +      struct vm_struct *area;
 +      void __iomem *ret;
 +      
 +      /* For now, require page-aligned values for pa, ea, and size */
 +      if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
 +          !IS_PAGE_ALIGNED(size)) {
 +              printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
 +              return 1;
 +      }
 +      
 +      if (!mem_init_done) {
 +              /* Two things to consider in this case:
 +               * 1) No records will be kept (imalloc, etc) that the region
 +               *    has been remapped
 +               * 2) It won't be easy to iounmap() the region later (because
 +               *    of 1)
 +               */
 +              ;
 +      } else {
 +              area = im_get_area(ea, size,
 +                      IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
 +              if (area == NULL) {
 +                      /* Expected when PHB-dlpar is in play */
 +                      return 1;
 +              }
 +              if (ea != (unsigned long) area->addr) {
 +                      printk(KERN_ERR "unexpected addr return from "
 +                             "im_get_area\n");
 +                      return 1;
 +              }
 +      }
 +      
 +      ret = __ioremap_com(pa, pa, ea, size, flags);
 +      if (ret == NULL) {
 +              printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
 +              return 1;
 +      }
 +      if (ret != (void *) ea) {
 +              printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
 +              return 1;
 +      }
 +
 +      return 0;
 +}
 +
 +/*  
 + * Unmap an IO region and remove it from imalloc'd list.
 + * Access to IO memory should be serialized by driver.
 + * This code is modeled after vmalloc code - unmap_vm_area()
 + *
 + * XXX        what about calls before mem_init_done (ie python_countermeasures())
 + */
 +void iounmap(volatile void __iomem *token)
 +{
 +      void *addr;
 +
 +      if (!mem_init_done)
 +              return;
 +      
 +      addr = (void *) ((unsigned long __force) token & PAGE_MASK);
 +
 +      im_free(addr);
 +}
 +
 +static int iounmap_subset_regions(unsigned long addr, unsigned long size)
 +{
 +      struct vm_struct *area;
 +
 +      /* Check whether subsets of this region exist */
 +      area = im_get_area(addr, size, IM_REGION_SUPERSET);
 +      if (area == NULL)
 +              return 1;
 +
 +      while (area) {
 +              iounmap((void __iomem *) area->addr);
 +              area = im_get_area(addr, size,
 +                              IM_REGION_SUPERSET);
 +      }
 +
 +      return 0;
 +}
 +
 +int iounmap_explicit(volatile void __iomem *start, unsigned long size)
 +{
 +      struct vm_struct *area;
 +      unsigned long addr;
 +      int rc;
 +      
 +      addr = (unsigned long __force) start & PAGE_MASK;
 +
 +      /* Verify that the region either exists or is a subset of an existing
 +       * region.  In the latter case, split the parent region to create 
 +       * the exact region 
 +       */
 +      area = im_get_area(addr, size, 
 +                          IM_REGION_EXISTS | IM_REGION_SUBSET);
 +      if (area == NULL) {
 +              /* Determine whether subset regions exist.  If so, unmap */
 +              rc = iounmap_subset_regions(addr, size);
 +              if (rc) {
 +                      printk(KERN_ERR
 +                             "%s() cannot unmap nonexistent range 0x%lx\n",
 +                              __FUNCTION__, addr);
 +                      return 1;
 +              }
 +      } else {
 +              iounmap((void __iomem *) area->addr);
 +      }
 +      /*
 +       * FIXME! This can't be right:
 +      iounmap(area->addr);
 +       * Maybe it should be "iounmap(area);"
 +       */
 +      return 0;
 +}
 +
 +#endif
 +
 +EXPORT_SYMBOL(ioremap);
 +EXPORT_SYMBOL(__ioremap);
 +EXPORT_SYMBOL(iounmap);
 +
 +void __iomem * reserve_phb_iospace(unsigned long size)
 +{
 +      void __iomem *virt_addr;
 +              
 +      if (phbs_io_bot >= IMALLOC_BASE) 
 +              panic("reserve_phb_iospace(): phb io space overflow\n");
 +                      
 +      virt_addr = (void __iomem *) phbs_io_bot;
 +      phbs_io_bot += size;
 +
 +      return virt_addr;
 +}
diff --combined arch/ppc/kernel/time.c
index 76f44ce4772ec05ea8b3dd4818983204a4b00a56,67797184f4eb3f99e9ae35147f8d6e438233997d..53ea723af60aa5fff91a1ff057986584098f7285
  
  #include <asm/time.h>
  
- /* XXX false sharing with below? */
- u64 jiffies_64 = INITIAL_JIFFIES;
- EXPORT_SYMBOL(jiffies_64);
  unsigned long disarm_decr[NR_CPUS];
  
  extern struct timezone sys_tz;
@@@ -121,15 -116,6 +116,15 @@@ unsigned long profile_pc(struct pt_reg
  EXPORT_SYMBOL(profile_pc);
  #endif
  
 +void wakeup_decrementer(void)
 +{
 +      set_dec(tb_ticks_per_jiffy);
 +      /* No currently-supported powerbook has a 601,
 +       * so use get_tbl, not native
 +       */
 +      last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
 +}
 +
  /*
   * timer_interrupt - gets called when the decrementer overflows,
   * with interrupts disabled.
index 2cc12b04584ad0dc5de7c9594fa88ca789be8f0b,eed4ff6903f1e4c3a2da0a8374b27755973aa76c..b6a66d5e9d8357177d1701e33b9563ccbc646540
@@@ -609,11 -609,6 +609,6 @@@ static void parse_bootinfo(unsigned lon
  }
  
  #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
- static int hdpu_ide_check_region(ide_ioreg_t from, unsigned int extent)
- {
-       return check_region(from, extent);
- }
  static void
  hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
  {
@@@ -753,7 -748,7 +748,7 @@@ static int smp_hdpu_probe(void
  }
  
  static void
 -smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait)
 +smp_hdpu_message_pass(int target, int msg)
  {
        if (msg > 0x3) {
                printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@@ -949,7 -944,7 +944,7 @@@ platform_init(unsigned long r3, unsigne
  #endif                                /* CONFIG_SERIAL_TEXT_DEBUG */
  
  #ifdef CONFIG_SMP
 -      ppc_md.smp_ops = &hdpu_smp_ops;
 +      smp_ops = &hdpu_smp_ops;
  #endif                                /* CONFIG_SMP */
  
  #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
diff --combined drivers/char/mem.c
index 9df928d4f68d42765fd35fe5e7764b2cd5a3e301,38be4b0dbd1cc03157d88e5db78ec727cee246de..91dd669273e0018aeeb95b02e18b4549e074a661
@@@ -231,7 -231,9 +231,7 @@@ static ssize_t write_mem(struct file * 
  static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  {
  #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
 -      unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 -
 -      vma->vm_page_prot = phys_mem_access_prot(file, offset,
 +      vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
                                                 vma->vm_end - vma->vm_start,
                                                 vma->vm_page_prot);
  #elif defined(pgprot_noncached)
@@@ -918,7 -920,8 +918,8 @@@ static int __init chr_dev_init(void
  
        mem_class = class_create(THIS_MODULE, "mem");
        for (i = 0; i < ARRAY_SIZE(devlist); i++) {
-               class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor),
+               class_device_create(mem_class, NULL,
+                                       MKDEV(MEM_MAJOR, devlist[i].minor),
                                        NULL, devlist[i].name);
                devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
                                S_IFCHR | devlist[i].mode, devlist[i].name);
diff --combined drivers/char/viotape.c
index d92a0564500793c110a5df68441480d9a51ae467,a5e104f428f8449602ffea248a3168d486212ca4..51abd3defc1c32d00e0217896f6ed64eccd74701
@@@ -956,9 -956,9 +956,9 @@@ static int viotape_probe(struct vio_de
        state[i].cur_part = 0;
        for (j = 0; j < MAX_PARTITIONS; ++j)
                state[i].part_stat_rwi[j] = VIOT_IDLE;
-       class_device_create(tape_class, MKDEV(VIOTAPE_MAJOR, i), NULL,
+       class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
                        "iseries!vt%d", i);
-       class_device_create(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80),
+       class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80),
                        NULL, "iseries!nvt%d", i);
        devfs_mk_cdev(MKDEV(VIOTAPE_MAJOR, i), S_IFCHR | S_IRUSR | S_IWUSR,
                        "iseries/vt%d", i);
@@@ -993,16 -993,13 +993,16 @@@ static struct vio_device_id viotape_dev
        { "viotape", "" },
        { "", "" }
  };
 -
  MODULE_DEVICE_TABLE(vio, viotape_device_table);
 +
  static struct vio_driver viotape_driver = {
 -      .name = "viotape",
        .id_table = viotape_device_table,
        .probe = viotape_probe,
 -      .remove = viotape_remove
 +      .remove = viotape_remove,
 +      .driver = {
 +              .name = "viotape",
 +              .owner = THIS_MODULE,
 +      }
  };
  
  
diff --combined drivers/net/bmac.c
index 0ee28899fb8dd636061d6073c6288a9629c42165,73f2fcfc557f84297f68a698696fd5b7f92310e8..bbca8ae8018c3718e0e41dd723f61d1712489791
@@@ -1658,7 -1658,6 +1658,7 @@@ static struct of_device_id bmac_match[
        },
        {},
  };
 +MODULE_DEVICE_TABLE (of, bmac_match);
  
  static struct macio_driver bmac_driver = 
  {
@@@ -1690,10 -1689,8 +1690,8 @@@ static void __exit bmac_exit(void
  {
        macio_unregister_driver(&bmac_driver);
  
-       if (bmac_emergency_rxbuf != NULL) {
-               kfree(bmac_emergency_rxbuf);
-               bmac_emergency_rxbuf = NULL;
-       }
+       kfree(bmac_emergency_rxbuf);
+       bmac_emergency_rxbuf = NULL;
  }
  
  MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
diff --combined drivers/net/ibmveth.c
index cbe9368a4d56e2da68f23ab92c3790fb38ac3d0f,36da54ad2b7bd0c00e1d504bd94c9eabac67dcb2..e5246f227c98afef92c476a1bbe55cdef3885d1c
@@@ -96,7 -96,7 +96,7 @@@ static void ibmveth_proc_unregister_dri
  static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
  static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
  static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
- static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
+ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
  
  #ifdef CONFIG_PROC_FS
  #define IBMVETH_PROC_DIR "net/ibmveth"
@@@ -181,6 -181,7 +181,7 @@@ static int ibmveth_alloc_buffer_pool(st
        atomic_set(&pool->available, 0);
        pool->producer_index = 0;
        pool->consumer_index = 0;
+       pool->active = 0;
  
        return 0;
  }
@@@ -236,7 -237,7 +237,7 @@@ static void ibmveth_replenish_buffer_po
                lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
                    
                if(lpar_rc != H_Success) {
-                       pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+                       pool->free_map[free_index] = index;
                        pool->skbuff[index] = NULL;
                        pool->consumer_index--;
                        dma_unmap_single(&adapter->vdev->dev,
        atomic_add(buffers_added, &(pool->available));
  }
  
- /* check if replenishing is needed.  */
- static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
- {
-       return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
-               (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
-               (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
- }
- /* kick the replenish tasklet if we need replenishing and it isn't already running */
- static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
- {
-       if(ibmveth_is_replenishing_needed(adapter) &&
-          (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
-               schedule_work(&adapter->replenish_task);
-       }
- }
- /* replenish tasklet routine */
+ /* replenish routine */
  static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 
  {
+       int i;
        adapter->replenish_task_cycles++;
  
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+       for(i = 0; i < IbmVethNumBufferPools; i++)
+               if(adapter->rx_buff_pool[i].active)
+                       ibmveth_replenish_buffer_pool(adapter, 
+                                                    &adapter->rx_buff_pool[i]);
  
        adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
-       atomic_inc(&adapter->not_replenishing);
-       ibmveth_schedule_replenishing(adapter);
  }
  
  /* empty and free ana buffer pool - also used to do cleanup in error paths */
@@@ -293,10 -276,8 +276,8 @@@ static void ibmveth_free_buffer_pool(st
  {
        int i;
  
-       if(pool->free_map) {
-               kfree(pool->free_map);
-               pool->free_map  = NULL;
-       }
+       kfree(pool->free_map);
+       pool->free_map = NULL;
  
        if(pool->skbuff && pool->dma_addr) {
                for(i = 0; i < pool->size; ++i) {
                kfree(pool->skbuff);
                pool->skbuff = NULL;
        }
+       pool->active = 0;
  }
  
  /* remove a buffer from a pool */
@@@ -379,6 -361,12 +361,12 @@@ static void ibmveth_rxq_recycle_buffer(
        ibmveth_assert(pool < IbmVethNumBufferPools);
        ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
  
+       if(!adapter->rx_buff_pool[pool].active) {
+               ibmveth_rxq_harvest_buffer(adapter);
+               ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
+               return;
+       }
        desc.desc = 0;
        desc.fields.valid = 1;
        desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@@ -409,6 -397,8 +397,8 @@@ static inline void ibmveth_rxq_harvest_
  
  static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
  {
+       int i;
        if(adapter->buffer_list_addr != NULL) {
                if(!dma_mapping_error(adapter->buffer_list_dma)) {
                        dma_unmap_single(&adapter->vdev->dev,
                adapter->rx_queue.queue_addr = NULL;
        }
  
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
  }
  
  static int ibmveth_open(struct net_device *netdev)
  {
        struct ibmveth_adapter *adapter = netdev->priv;
        u64 mac_address = 0;
-       int rxq_entries;
+       int rxq_entries = 1;
        unsigned long lpar_rc;
        int rc;
        union ibmveth_buf_desc rxq_desc;
+       int i;
  
        ibmveth_debug_printk("open starting\n");
  
-       rxq_entries =
-               adapter->rx_buff_pool[0].size +
-               adapter->rx_buff_pool[1].size +
-               adapter->rx_buff_pool[2].size + 1;
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               rxq_entries += adapter->rx_buff_pool[i].size;
      
        adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
        adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
        adapter->rx_queue.num_slots = rxq_entries;
        adapter->rx_queue.toggle = 1;
  
-       if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) ||
-          ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) ||
-          ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
-       {
-               ibmveth_error_printk("unable to allocate buffer pools\n");
-               ibmveth_cleanup(adapter);
-               return -ENOMEM;
-       }
+       /* call change_mtu to init the buffer pools based in initial mtu */
+       ibmveth_change_mtu(netdev, netdev->mtu);
  
        memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
        mac_address = mac_address >> 16;
  
        if(lpar_rc != H_Success) {
                ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
 -              ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
 +              ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
                                     adapter->buffer_list_dma,
                                     adapter->filter_list_dma,
                                     rxq_desc.desc,
                return rc;
        }
  
-       netif_start_queue(netdev);
+       ibmveth_debug_printk("initial replenish cycle\n");
+       ibmveth_replenish_task(adapter);
  
-       ibmveth_debug_printk("scheduling initial replenish cycle\n");
-       ibmveth_schedule_replenishing(adapter);
+       netif_start_queue(netdev);
  
        ibmveth_debug_printk("open complete\n");
  
@@@ -573,9 -555,6 +555,6 @@@ static int ibmveth_close(struct net_dev
  
        free_irq(netdev->irq, netdev);
  
-       cancel_delayed_work(&adapter->replenish_task);
-       flush_scheduled_work();
        do {
                lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
        } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@@ -640,12 -619,18 +619,18 @@@ static int ibmveth_start_xmit(struct sk
        unsigned long lpar_rc;
        int nfrags = 0, curfrag;
        unsigned long correlator;
+       unsigned long flags;
        unsigned int retry_count;
+       unsigned int tx_dropped = 0;
+       unsigned int tx_bytes = 0;
+       unsigned int tx_packets = 0;
+       unsigned int tx_send_failed = 0;
+       unsigned int tx_map_failed = 0;
  
        if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
-               adapter->stats.tx_dropped++;
-               dev_kfree_skb(skb);
-               return 0;
+               tx_dropped++;
+               goto out;
        }
  
        memset(&desc, 0, sizeof(desc));
  
        if(dma_mapping_error(desc[0].fields.address)) {
                ibmveth_error_printk("tx: unable to map initial fragment\n");
-               adapter->tx_map_failed++;
-               adapter->stats.tx_dropped++;
-               dev_kfree_skb(skb);
-               return 0;
+               tx_map_failed++;
+               tx_dropped++;
+               goto out;
        }
  
        curfrag = nfrags;
  
                if(dma_mapping_error(desc[curfrag+1].fields.address)) {
                        ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
-                       adapter->tx_map_failed++;
-                       adapter->stats.tx_dropped++;
+                       tx_map_failed++;
+                       tx_dropped++;
                        /* Free all the mappings we just created */
                        while(curfrag < nfrags) {
                                dma_unmap_single(&adapter->vdev->dev,
                                                 DMA_TO_DEVICE);
                                curfrag++;
                        }
-                       dev_kfree_skb(skb);
-                       return 0;
+                       goto out;
                }
        }
  
                        ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
                                             desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
                }
-               adapter->tx_send_failed++;
-               adapter->stats.tx_dropped++;
+               tx_send_failed++;
+               tx_dropped++;
        } else {
-               adapter->stats.tx_packets++;
-               adapter->stats.tx_bytes += skb->len;
+               tx_packets++;
+               tx_bytes += skb->len;
+               netdev->trans_start = jiffies;
        }
  
        do {
                                desc[nfrags].fields.length, DMA_TO_DEVICE);
        } while(--nfrags >= 0);
  
+ out:  spin_lock_irqsave(&adapter->stats_lock, flags);
+       adapter->stats.tx_dropped += tx_dropped;
+       adapter->stats.tx_bytes += tx_bytes;
+       adapter->stats.tx_packets += tx_packets;
+       adapter->tx_send_failed += tx_send_failed;
+       adapter->tx_map_failed += tx_map_failed;
+       spin_unlock_irqrestore(&adapter->stats_lock, flags);
        dev_kfree_skb(skb);
        return 0;
  }
@@@ -776,13 -768,14 +768,14 @@@ static int ibmveth_poll(struct net_devi
                                adapter->stats.rx_packets++;
                                adapter->stats.rx_bytes += length;
                                frames_processed++;
+                               netdev->last_rx = jiffies;
                        }
                } else {
                        more_work = 0;
                }
        } while(more_work && (frames_processed < max_frames_to_process));
  
-       ibmveth_schedule_replenishing(adapter);
+       ibmveth_replenish_task(adapter);
  
        if(more_work) {
                /* more work to do - return that we are not done yet */
@@@ -883,17 -876,54 +876,54 @@@ static void ibmveth_set_multicast_list(
  
  static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
  {
-       if ((new_mtu < 68) || (new_mtu > (1<<20)))
+       struct ibmveth_adapter *adapter = dev->priv;
+       int i;
+       int prev_smaller = 1;
+       if ((new_mtu < 68) || 
+           (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
                return -EINVAL;
+       for(i = 0; i<IbmVethNumBufferPools; i++) {
+               int activate = 0;
+               if (new_mtu > (pool_size[i]  - IBMVETH_BUFF_OH)) { 
+                       activate = 1;
+                       prev_smaller= 1;
+               } else {
+                       if (prev_smaller)
+                               activate = 1;
+                       prev_smaller= 0;
+               }
+               if (activate && !adapter->rx_buff_pool[i].active) {
+                       struct ibmveth_buff_pool *pool = 
+                                               &adapter->rx_buff_pool[i];
+                       if(ibmveth_alloc_buffer_pool(pool)) {
+                               ibmveth_error_printk("unable to alloc pool\n");
+                               return -ENOMEM;
+                       }
+                       adapter->rx_buff_pool[i].active = 1;
+               } else if (!activate && adapter->rx_buff_pool[i].active) {
+                       adapter->rx_buff_pool[i].active = 0;
+                       h_free_logical_lan_buffer(adapter->vdev->unit_address,
+                                         (u64)pool_size[i]);
+               }
+       }
+       /* kick the interrupt handler so that the new buffer pools get
+          replenished or deallocated */
+       ibmveth_interrupt(dev->irq, dev, NULL);
        dev->mtu = new_mtu;
        return 0;       
  }
  
  static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
  {
-       int rc;
+       int rc, i;
        struct net_device *netdev;
-       struct ibmveth_adapter *adapter;
+       struct ibmveth_adapter *adapter = NULL;
  
        unsigned char *mac_addr_p;
        unsigned int *mcastFilterSize_p;
        netdev->ethtool_ops           = &netdev_ethtool_ops;
        netdev->change_mtu         = ibmveth_change_mtu;
        SET_NETDEV_DEV(netdev, &dev->dev);
+       netdev->features |= NETIF_F_LLTX; 
+       spin_lock_init(&adapter->stats_lock);
  
        memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
  
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize);
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize);
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize);
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 
+                                        pool_count[i], pool_size[i]);
  
        ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
  
-       INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
        adapter->buffer_list_dma = DMA_ERROR_CODE;
        adapter->filter_list_dma = DMA_ERROR_CODE;
        adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
  
-       atomic_set(&adapter->not_replenishing, 1);
        ibmveth_debug_printk("registering netdev...\n");
  
        rc = register_netdev(netdev);
@@@ -1146,16 -1174,14 +1174,16 @@@ static struct vio_device_id ibmveth_dev
        { "network", "IBM,l-lan"},
        { "", "" }
  };
 -
  MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
  
  static struct vio_driver ibmveth_driver = {
 -      .name        = (char *)ibmveth_driver_name,
 -      .id_table    = ibmveth_device_table,
 -      .probe       = ibmveth_probe,
 -      .remove      = ibmveth_remove
 +      .id_table       = ibmveth_device_table,
 +      .probe          = ibmveth_probe,
 +      .remove         = ibmveth_remove,
 +      .driver         = {
 +              .name   = ibmveth_driver_name,
 +              .owner  = THIS_MODULE,
 +      }
  };
  
  static int __init ibmveth_module_init(void)
diff --combined drivers/net/mace.c
index f2fc1f26cd4b56eba166874d2365cfd77183616f,09b1e7b364e5a94b3348cd7eb99d29fcf6b3d036..2a5add257b8f32fab854367946d0eccef51e1591
@@@ -1016,7 -1016,6 +1016,7 @@@ static struct of_device_id mace_match[
        },
        {},
  };
 +MODULE_DEVICE_TABLE (of, mace_match);
  
  static struct macio_driver mace_driver = 
  {
@@@ -1036,10 -1035,8 +1036,8 @@@ static void __exit mace_cleanup(void
  {
        macio_unregister_driver(&mace_driver);
  
-       if (dummy_buf) {
-               kfree(dummy_buf);
-               dummy_buf = NULL;
-       }
+       kfree(dummy_buf);
+       dummy_buf = NULL;
  }
  
  MODULE_AUTHOR("Paul Mackerras");
diff --combined drivers/pcmcia/Makefile
index cb4861d9cb8027f1e7109d6111991f619d9c0c19,da7a8f2dab24279db110e53ba6533b049fbeb515..fe37541abbfe81b1f9fcf05fbd20bf6b31113611
@@@ -25,7 -25,6 +25,7 @@@ obj-$(CONFIG_PD6729)                          += pd6729.
  obj-$(CONFIG_I82365)                          += i82365.o
  obj-$(CONFIG_I82092)                          += i82092.o
  obj-$(CONFIG_TCIC)                            += tcic.o
 +obj-$(CONFIG_PCMCIA_M8XX)                              += m8xx_pcmcia.o
  obj-$(CONFIG_HD64465_PCMCIA)                  += hd64465_ss.o
  obj-$(CONFIG_PCMCIA_SA1100)                   += sa11xx_core.o sa1100_cs.o
  obj-$(CONFIG_PCMCIA_SA1111)                   += sa11xx_core.o sa1111_cs.o
@@@ -43,9 -42,11 +43,11 @@@ pxa2xx_core-y                                       += soc_common.o pxa2x
  au1x00_ss-y                                   += au1000_generic.o
  au1x00_ss-$(CONFIG_MIPS_PB1000)                       += au1000_pb1x00.o
  au1x00_ss-$(CONFIG_MIPS_PB1100)                       += au1000_pb1x00.o
+ au1x00_ss-$(CONFIG_MIPS_PB1200)                       += au1000_db1x00.o
  au1x00_ss-$(CONFIG_MIPS_PB1500)                       += au1000_pb1x00.o
  au1x00_ss-$(CONFIG_MIPS_DB1000)                       += au1000_db1x00.o
  au1x00_ss-$(CONFIG_MIPS_DB1100)                       += au1000_db1x00.o
+ au1x00_ss-$(CONFIG_MIPS_DB1200)                 += au1000_db1x00.o
  au1x00_ss-$(CONFIG_MIPS_DB1500)                       += au1000_db1x00.o
  au1x00_ss-$(CONFIG_MIPS_DB1550)                       += au1000_db1x00.o
  au1x00_ss-$(CONFIG_MIPS_XXS1500)               += au1000_xxs1500.o
@@@ -58,6 -59,7 +60,7 @@@ sa1111_cs-$(CONFIG_SA1100_JORNADA720)         
  sa1100_cs-y                                   += sa1100_generic.o
  sa1100_cs-$(CONFIG_SA1100_ASSABET)            += sa1100_assabet.o
  sa1100_cs-$(CONFIG_SA1100_CERF)                       += sa1100_cerf.o
+ sa1100_cs-$(CONFIG_SA1100_COLLIE)              += pxa2xx_sharpsl.o
  sa1100_cs-$(CONFIG_SA1100_H3600)              += sa1100_h3600.o
  sa1100_cs-$(CONFIG_SA1100_SHANNON)            += sa1100_shannon.o
  sa1100_cs-$(CONFIG_SA1100_SIMPAD)             += sa1100_simpad.o
diff --combined drivers/video/fbmem.c
index ca02aa2bfcece83594d920ee2df7ff9d7c03fa28,9073be4221a81d1e186c53b76db515ba5dcb4578..e2667ddab3f11033fd440cf10ed57365741ae35e
@@@ -918,7 -918,7 +918,7 @@@ fb_mmap(struct file *file, struct vm_ar
        }
  #endif
  #elif defined(__powerpc__)
 -      vma->vm_page_prot = phys_mem_access_prot(file, off,
 +      vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
                                                 vma->vm_end - vma->vm_start,
                                                 vma->vm_page_prot);
  #elif defined(__alpha__)
@@@ -1031,7 -1031,7 +1031,7 @@@ register_framebuffer(struct fb_info *fb
                        break;
        fb_info->node = i;
  
-       fb_info->class_device = class_device_create(fb_class, MKDEV(FB_MAJOR, i),
+       fb_info->class_device = class_device_create(fb_class, NULL, MKDEV(FB_MAJOR, i),
                                    fb_info->device, "fb%d", i);
        if (IS_ERR(fb_info->class_device)) {
                /* Not fatal */
index 0a5b83a3c9491a7e8f6bc59065d9577cb4539370,7a647fae3765bcc17fe127c915325d8b42f4f5bf..79bae4933b73a2e57cd34c9d6ba7a18229095027
@@@ -1,14 -1,18 +1,14 @@@
 +#ifndef _ASM_POWERPC_RWSEM_H
 +#define _ASM_POWERPC_RWSEM_H
 +
 +#ifdef __KERNEL__
 +
  /*
   * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
   * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
   * by Paul Mackerras <paulus@samba.org>.
 - *
 - * This program is free software; you can redistribute it and/or
 - * modify it under the terms of the GNU General Public License
 - * as published by the Free Software Foundation; either version
 - * 2 of the License, or (at your option) any later version.
   */
  
 -#ifndef _PPC64_RWSEM_H
 -#define _PPC64_RWSEM_H
 -
 -#ifdef __KERNEL__
  #include <linux/list.h>
  #include <linux/spinlock.h>
  #include <asm/atomic.h>
@@@ -159,5 -163,10 +159,10 @@@ static inline int rwsem_atomic_update(i
        return atomic_add_return(delta, (atomic_t *)(&sem->count));
  }
  
 -#endif /* __KERNEL__ */
 -#endif /* _PPC_RWSEM_XADD_H */
+ static inline int rwsem_is_locked(struct rw_semaphore *sem)
+ {
+       return (sem->count != 0);
+ }
 +#endif        /* __KERNEL__ */
 +#endif        /* _ASM_POWERPC_RWSEM_H */
index fd42fe97158fd2f91744d8c40999f821c15a11c2,d9ecb9969238fb1576efc62a76a7b744d410789d..57369d2cadef816fdb551b83b6d59b647f1763da
@@@ -1,5 -1,5 +1,5 @@@
 -#ifndef _PPC64_SEMAPHORE_H
 -#define _PPC64_SEMAPHORE_H
 +#ifndef _ASM_POWERPC_SEMAPHORE_H
 +#define _ASM_POWERPC_SEMAPHORE_H
  
  /*
   * Remove spinlock-based RW semaphores; RW semaphore definitions are
@@@ -31,9 -31,6 +31,6 @@@ struct semaphore 
        .wait           = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)    \
  }
  
- #define __MUTEX_INITIALIZER(name) \
-       __SEMAPHORE_INITIALIZER(name, 1)
  #define __DECLARE_SEMAPHORE_GENERIC(name, count) \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  
@@@ -95,4 -92,4 +92,4 @@@ static inline void up(struct semaphore 
  
  #endif /* __KERNEL__ */
  
 -#endif /* !(_PPC64_SEMAPHORE_H) */
 +#endif /* _ASM_POWERPC_SEMAPHORE_H */
index c2d039e338a87c30385ee616887e9a8e5ca66994,404c143e643d910aee6711412c56e0d48ecb4658..0991dfceef1df98979a45e5431107190752227a7
@@@ -3,13 -3,7 +3,13 @@@
  
  /*
   * This file contains the system call numbers.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version
 + * 2 of the License, or (at your option) any later version.
   */
 +
  #define __NR_restart_syscall    0
  #define __NR_exit               1
  #define __NR_fork               2
  #define __NR_vfork            189
  #define __NR_ugetrlimit               190     /* SuS compliant getrlimit */
  #define __NR_readahead                191
 +#ifndef __powerpc64__                 /* these are 32-bit only */
  #define __NR_mmap2            192
  #define __NR_truncate64               193
  #define __NR_ftruncate64      194
  #define __NR_stat64           195
  #define __NR_lstat64          196
  #define __NR_fstat64          197
 +#endif
  #define __NR_pciconfig_read   198
  #define __NR_pciconfig_write  199
  #define __NR_pciconfig_iobase 200
  #define __NR_multiplexer      201
  #define __NR_getdents64               202
  #define __NR_pivot_root               203
 +#ifndef __powerpc64__
  #define __NR_fcntl64          204
 +#endif
  #define __NR_madvise          205
  #define __NR_mincore          206
  #define __NR_gettid           207
  #define __NR_sched_getaffinity        223
  /* 224 currently unused */
  #define __NR_tuxcall          225
 +#ifndef __powerpc64__
  #define __NR_sendfile64               226
 +#endif
  #define __NR_io_setup         227
  #define __NR_io_destroy               228
  #define __NR_io_getevents     229
  #define __NR_utimes           251
  #define __NR_statfs64         252
  #define __NR_fstatfs64                253
 +#ifndef __powerpc64__
  #define __NR_fadvise64_64     254
 +#endif
  #define __NR_rtas             255
  #define __NR_sys_debug_setcontext 256
  /* Number 257 is reserved for vserver */
  /* 258 currently unused */
 -/* Number 259 is reserved for new sys_mbind */
 -/* Number 260 is reserved for new sys_get_mempolicy */
 -/* Number 261 is reserved for new sys_set_mempolicy */
 +#define __NR_mbind            259
 +#define __NR_get_mempolicy    260
 +#define __NR_set_mempolicy    261
  #define __NR_mq_open          262
  #define __NR_mq_unlink                263
  #define __NR_mq_timedsend     264
  
  #define __NR_syscalls         278
  
 -#define __NR(n)       #n
 +#ifdef __KERNEL__
 +#define __NR__exit __NR_exit
 +#define NR_syscalls   __NR_syscalls
 +#endif
 +
 +#ifndef __ASSEMBLY__
  
  /* On powerpc a system call basically clobbers the same registers like a
   * function call, with the exception of LR (which is needed for the
@@@ -408,6 -389,7 +408,6 @@@ type name(type1 arg1, type2 arg2, type
  {                                                                     \
        __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);      \
  }
 -
  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
  type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
  {                                                                     \
  
  #ifdef __KERNEL__
  
 -#define __NR__exit __NR_exit
 -#define NR_syscalls   __NR_syscalls
 +#include <linux/config.h>
 +#include <linux/types.h>
 +#include <linux/compiler.h>
 +#include <linux/linkage.h>
  
  #define __ARCH_WANT_IPC_PARSE_VERSION
  #define __ARCH_WANT_OLD_READDIR
 -#define __ARCH_WANT_OLD_STAT
  #define __ARCH_WANT_STAT64
  #define __ARCH_WANT_SYS_ALARM
  #define __ARCH_WANT_SYS_GETHOSTNAME
  #define __ARCH_WANT_SYS_SIGPENDING
  #define __ARCH_WANT_SYS_SIGPROCMASK
  #define __ARCH_WANT_SYS_RT_SIGACTION
 -
 -/*
 - * Forking from kernel space will result in the child getting a new,
 - * empty kernel stack area.  Thus the child cannot access automatic
 - * variables set in the parent unless they are in registers, and the
 - * procedure where the fork was done cannot return to its caller in
 - * the child.
 - */
 -
 -#ifdef __KERNEL_SYSCALLS__
 -
 -#include <linux/compiler.h>
 -#include <linux/types.h>
 +#ifdef CONFIG_PPC32
 +#define __ARCH_WANT_OLD_STAT
 +#endif
 +#ifdef CONFIG_PPC64
 +#define __ARCH_WANT_COMPAT_SYS_TIME
 +#endif
  
  /*
   * System call prototypes.
   */
 +#ifdef __KERNEL_SYSCALLS__
  extern pid_t setsid(void);
  extern int write(int fd, const char *buf, off_t count);
  extern int read(int fd, char *buf, off_t count);
@@@ -462,13 -449,10 +462,13 @@@ extern int execve(const char *file, cha
  extern int open(const char *file, int flag, int mode);
  extern int close(int fd);
  extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
 +#endif /* __KERNEL_SYSCALLS__ */
  
 -unsigned long sys_mmap(unsigned long addr, size_t len,
 -                      unsigned long prot, unsigned long flags,
 -                      unsigned long fd, off_t offset);
 +/*
 + * Functions that implement syscalls.
 + */
 +unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
 +                     unsigned long flags, unsigned long fd, off_t offset);
  unsigned long sys_mmap2(unsigned long addr, size_t len,
                        unsigned long prot, unsigned long flags,
                        unsigned long fd, unsigned long pgoff);
@@@ -477,19 -461,21 +477,18 @@@ int sys_execve(unsigned long a0, unsign
                unsigned long a3, unsigned long a4, unsigned long a5,
                struct pt_regs *regs);
  int sys_clone(unsigned long clone_flags, unsigned long usp,
 -            int __user *parent_tidp, void __user *child_threadptr,
 -            int __user *child_tidp, int p6,
 -            struct pt_regs *regs);
 -int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
 +              int __user *parent_tidp, void __user *child_threadptr,
 +              int __user *child_tidp, int p6, struct pt_regs *regs);
 +int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
 +              unsigned long p4, unsigned long p5, unsigned long p6,
                struct pt_regs *regs);
 -int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
 +int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
 +              unsigned long p4, unsigned long p5, unsigned long p6,
                struct pt_regs *regs);
  int sys_pipe(int __user *fildes);
- int sys_ptrace(long request, long pid, long addr, long data);
  struct sigaction;
 -long sys_rt_sigaction(int sig,
 -                    const struct sigaction __user *act,
 -                    struct sigaction __user *oact,
 -                    size_t sigsetsize);
 -
 -#endif /* __KERNEL_SYSCALLS__ */
 +long sys_rt_sigaction(int sig, const struct sigaction __user *act,
 +                    struct sigaction __user *oact, size_t sigsetsize);
  
  /*
   * "Conditional" syscalls
   * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
   * but it doesn't work on all toolchains, so we just do it by hand
   */
 -#ifndef cond_syscall
 +#ifdef CONFIG_PPC32
  #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
 +#else
 +#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
  #endif
  
 -#endif /* __KERNEL__ */
 +#endif                /* __KERNEL__ */
 +
 +#endif                /* __ASSEMBLY__ */
  
  #endif /* _ASM_PPC_UNISTD_H_ */
index 8cf5991540e36630f946f555df66c82cd012228f,2eb1778a3a15542ee8cc9840f09837446d01a1c4..8c3f574046b6cbb6310bb72cc4eaa20a464e69c0
@@@ -471,17 -471,19 +471,19 @@@ static inline void __ptep_set_access_fl
  #define pgprot_noncached(prot)        (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
  
  struct file;
 -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
 +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                     unsigned long size, pgprot_t vma_prot);
  #define __HAVE_PHYS_MEM_ACCESS_PROT
  
  #define __HAVE_ARCH_PTE_SAME
  #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
  
+ #define pte_ERROR(e) \
+       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
  #define pmd_ERROR(e) \
        printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
  #define pud_ERROR(e) \
-       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
+       printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
  #define pgd_ERROR(e) \
        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))