]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Pull hp-machvec into release branch
authorTony Luck <tony.luck@intel.com>
Fri, 28 Oct 2005 18:15:25 +0000 (11:15 -0700)
committerTony Luck <tony.luck@intel.com>
Fri, 28 Oct 2005 18:15:25 +0000 (11:15 -0700)
1  2 
arch/ia64/lib/swiotlb.c

diff --combined arch/ia64/lib/swiotlb.c
index a604efc7f6c9db2f859243dae31a35760b6406da,875b0c16250cdea9e03b7a2ed7f334392af83d5e..48e5ff26eb1d828a4db9eb451d2af76663b77c3f
   */
  #define IO_TLB_SHIFT 11
  
+ #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+ /*
+  * Minimum IO TLB size to bother booting with.  Systems with mainly
+  * 64bit capable cards will only lightly use the swiotlb.  If we can't
+  * allocate a contiguous 1MB, we're probably in trouble anyway.
+  */
+ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
  int swiotlb_force;
  
  /*
@@@ -123,8 -132,8 +132,8 @@@ swiotlb_init_with_default_size (size_t 
        /*
         * Get IO TLB memory from the low pages
         */
 -      io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
 -                                             (1 << IO_TLB_SHIFT));
 +      io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs *
 +                                           (1 << IO_TLB_SHIFT), 0x100000000);
        if (!io_tlb_start)
                panic("Cannot allocate SWIOTLB buffer");
        io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
@@@ -154,6 -163,99 +163,99 @@@ swiotlb_init (void
        swiotlb_init_with_default_size(64 * (1<<20));   /* default to 64MB */
  }
  
+ /*
+  * Systems with larger DMA zones (those that don't support ISA) can
+  * initialize the swiotlb later using the slab allocator if needed.
+  * This should be just like above, but with some error catching.
+  */
+ int
+ swiotlb_late_init_with_default_size (size_t default_size)
+ {
+       unsigned long i, req_nslabs = io_tlb_nslabs;
+       unsigned int order;
+       if (!io_tlb_nslabs) {
+               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+       /*
+        * Get IO TLB memory from the low pages
+        */
+       order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+       io_tlb_nslabs = SLABS_PER_PAGE << order;
+       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+               io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+                                                       order);
+               if (io_tlb_start)
+                       break;
+               order--;
+       }
+       if (!io_tlb_start)
+               goto cleanup1;
+       if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
+               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+               io_tlb_nslabs = SLABS_PER_PAGE << order;
+       }
+       io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+       memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between io_tlb_start and io_tlb_end.
+        */
+       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+                                     get_order(io_tlb_nslabs * sizeof(int)));
+       if (!io_tlb_list)
+               goto cleanup2;
+       for (i = 0; i < io_tlb_nslabs; i++)
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+       io_tlb_index = 0;
+       io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
+                                  get_order(io_tlb_nslabs * sizeof(char *)));
+       if (!io_tlb_orig_addr)
+               goto cleanup3;
+       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+       /*
+        * Get the overflow emergency buffer
+        */
+       io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                                 get_order(io_tlb_overflow));
+       if (!io_tlb_overflow_buffer)
+               goto cleanup4;
+       printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
+              "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
+              virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+       return 0;
+ cleanup4:
+       free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
+                                                             sizeof(char *)));
+       io_tlb_orig_addr = NULL;
+ cleanup3:
+       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+                                                        sizeof(int)));
+       io_tlb_list = NULL;
+       io_tlb_end = NULL;
+ cleanup2:
+       free_pages((unsigned long)io_tlb_start, order);
+       io_tlb_start = NULL;
+ cleanup1:
+       io_tlb_nslabs = req_nslabs;
+       return -ENOMEM;
+ }
  static inline int
  address_needs_mapping(struct device *hwdev, dma_addr_t addr)
  {