2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
15 #include <asm/hwrpb.h>
23 # define DBGA(args...) printk(KERN_DEBUG args)
25 # define DBGA(args...)
28 # define DBGA2(args...) printk(KERN_DEBUG args)
30 # define DBGA2(args...)
33 #define DEBUG_NODIRECT 0
34 #define DEBUG_FORCEDAC 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
45 calc_npages(long bytes)
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 /* Return the minimum of MAX or the first power of two larger
55 size_for_memory(unsigned long max)
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
59 max = roundup_pow_of_two(mem);
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
80 #ifdef CONFIG_DISCONTIGMEM
82 if (!NODE_DATA(nid) ||
83 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
85 printk("%s: couldn't allocate arena from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena = alloc_bootmem(sizeof(*arena));
91 if (!NODE_DATA(nid) ||
92 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
96 printk("%s: couldn't allocate arena ptes from node %d\n"
97 " falling back to system-wide allocation\n",
99 arena->ptes = __alloc_bootmem(mem_size, align, 0);
102 #else /* CONFIG_DISCONTIGMEM */
104 arena = alloc_bootmem(sizeof(*arena));
105 arena->ptes = __alloc_bootmem(mem_size, align, 0);
107 #endif /* CONFIG_DISCONTIGMEM */
109 spin_lock_init(&arena->lock);
111 arena->dma_base = base;
112 arena->size = window_size;
113 arena->next_entry = 0;
115 /* Align allocations to a multiple of a page size. Not needed
116 unless there are chip bugs. */
117 arena->align_entry = 1;
122 struct pci_iommu_arena * __init
123 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
124 unsigned long window_size, unsigned long align)
126 return iommu_arena_new_node(0, hose, base, window_size, align);
129 static inline int is_span_boundary(unsigned int index, unsigned int nr,
131 unsigned long boundary_size)
133 shift = (shift + index) & (boundary_size - 1);
134 return shift + nr > boundary_size;
137 /* Must be called with the arena lock held */
139 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
146 unsigned long boundary_size;
148 BUG_ON(arena->dma_base & ~PAGE_MASK);
149 base = arena->dma_base >> PAGE_SHIFT;
151 boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE)
154 boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT;
156 BUG_ON(!is_power_of_2(boundary_size));
158 /* Search forward for the first mask-aligned sequence of N free ptes */
160 nent = arena->size >> PAGE_SHIFT;
161 p = ALIGN(arena->next_entry, mask + 1);
165 while (i < n && p+i < nent) {
166 if (!i && is_span_boundary(p, n, base, boundary_size)) {
167 p = ALIGN(p + 1, mask + 1);
172 p = ALIGN(p + i + 1, mask + 1), i = 0;
180 * Reached the end. Flush the TLB and restart
181 * the search from the beginning.
183 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
193 /* Success. It's the responsibility of the caller to mark them
194 in use before releasing the lock */
199 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
206 spin_lock_irqsave(&arena->lock, flags);
208 /* Search for N empty ptes */
210 mask = max(align, arena->align_entry) - 1;
211 p = iommu_arena_find_pages(dev, arena, n, mask);
213 spin_unlock_irqrestore(&arena->lock, flags);
217 /* Success. Mark them all in use, ie not zero and invalid
218 for the iommu tlb that could load them from under us.
219 The chip specific bits will fill this in with something
220 kosher when we return. */
221 for (i = 0; i < n; ++i)
222 ptes[p+i] = IOMMU_INVALID_PTE;
224 arena->next_entry = p + n;
225 spin_unlock_irqrestore(&arena->lock, flags);
231 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
236 p = arena->ptes + ofs;
237 for (i = 0; i < n; ++i)
241 /* True if the machine supports DAC addressing, and DEV can
242 make use of it given MASK. */
243 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
245 /* Map a single buffer of the indicated size for PCI DMA in streaming
246 mode. The 32-bit PCI bus mastering address to use is returned.
247 Once the device is given the dma address, the device owns this memory
248 until either pci_unmap_single or pci_dma_sync_single is performed. */
251 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
254 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
255 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
256 struct pci_iommu_arena *arena;
257 long npages, dma_ofs, i;
260 unsigned int align = 0;
261 struct device *dev = pdev ? &pdev->dev : NULL;
263 paddr = __pa(cpu_addr);
266 /* First check to see if we can use the direct map window. */
267 if (paddr + size + __direct_map_base - 1 <= max_dma
268 && paddr + size <= __direct_map_size) {
269 ret = paddr + __direct_map_base;
271 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
272 cpu_addr, size, ret, __builtin_return_address(0));
278 /* Next, use DAC if selected earlier. */
280 ret = paddr + alpha_mv.pci_dac_offset;
282 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
283 cpu_addr, size, ret, __builtin_return_address(0));
288 /* If the machine doesn't define a pci_tbi routine, we have to
289 assume it doesn't support sg mapping, and, since we tried to
290 use direct_map above, it now must be considered an error. */
291 if (! alpha_mv.mv_pci_tbi) {
292 static int been_here = 0; /* Only print the message once. */
294 printk(KERN_WARNING "pci_map_single: no HW sg\n");
300 arena = hose->sg_pci;
301 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
302 arena = hose->sg_isa;
304 npages = calc_npages((paddr & ~PAGE_MASK) + size);
306 /* Force allocation to 64KB boundary for ISA bridges. */
307 if (pdev && pdev == isa_bridge)
309 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
311 printk(KERN_WARNING "pci_map_single failed: "
312 "could not allocate dma page tables\n");
317 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
318 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
320 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
321 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
323 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
324 cpu_addr, size, npages, ret, __builtin_return_address(0));
330 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
334 if (dir == PCI_DMA_NONE)
337 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
338 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
340 EXPORT_SYMBOL(pci_map_single);
343 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
344 size_t size, int dir)
348 if (dir == PCI_DMA_NONE)
351 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
352 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
355 EXPORT_SYMBOL(pci_map_page);
357 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
358 SIZE must match what was provided for in a previous pci_map_single
359 call. All other usages are undefined. After this call, reads by
360 the cpu to the buffer are guaranteed to see whatever the device
364 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
368 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
369 struct pci_iommu_arena *arena;
370 long dma_ofs, npages;
372 if (direction == PCI_DMA_NONE)
375 if (dma_addr >= __direct_map_base
376 && dma_addr < __direct_map_base + __direct_map_size) {
379 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
380 dma_addr, size, __builtin_return_address(0));
385 if (dma_addr > 0xffffffff) {
386 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
387 dma_addr, size, __builtin_return_address(0));
391 arena = hose->sg_pci;
392 if (!arena || dma_addr < arena->dma_base)
393 arena = hose->sg_isa;
395 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
396 if (dma_ofs * PAGE_SIZE >= arena->size) {
397 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
398 " base %lx size %x\n", dma_addr, arena->dma_base,
404 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
406 spin_lock_irqsave(&arena->lock, flags);
408 iommu_arena_free(arena, dma_ofs, npages);
410 /* If we're freeing ptes above the `next_entry' pointer (they
411 may have snuck back into the TLB since the last wrap flush),
412 we need to flush the TLB before reallocating the latter. */
413 if (dma_ofs >= arena->next_entry)
414 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
416 spin_unlock_irqrestore(&arena->lock, flags);
418 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
419 dma_addr, size, npages, __builtin_return_address(0));
421 EXPORT_SYMBOL(pci_unmap_single);
424 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
425 size_t size, int direction)
427 pci_unmap_single(pdev, dma_addr, size, direction);
429 EXPORT_SYMBOL(pci_unmap_page);
431 /* Allocate and map kernel buffer using consistent mode DMA for PCI
432 device. Returns non-NULL cpu-view pointer to the buffer if
433 successful and sets *DMA_ADDRP to the pci side dma address as well,
434 else DMA_ADDRP is undefined. */
437 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
440 long order = get_order(size);
441 gfp_t gfp = GFP_ATOMIC;
444 cpu_addr = (void *)__get_free_pages(gfp, order);
446 printk(KERN_INFO "pci_alloc_consistent: "
447 "get_free_pages failed from %p\n",
448 __builtin_return_address(0));
449 /* ??? Really atomic allocation? Otherwise we could play
450 with vmalloc and sg if we can't find contiguous memory. */
453 memset(cpu_addr, 0, size);
455 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
456 if (*dma_addrp == 0) {
457 free_pages((unsigned long)cpu_addr, order);
458 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
460 /* The address doesn't fit required mask and we
461 do not have iommu. Try again with GFP_DMA. */
466 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
467 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
471 EXPORT_SYMBOL(pci_alloc_consistent);
473 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
474 be values that were returned from pci_alloc_consistent. SIZE must
475 be the same as what as passed into pci_alloc_consistent.
476 References to the memory and mappings associated with CPU_ADDR or
477 DMA_ADDR past this call are illegal. */
480 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
483 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
484 free_pages((unsigned long)cpu_addr, get_order(size));
486 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
487 dma_addr, size, __builtin_return_address(0));
489 EXPORT_SYMBOL(pci_free_consistent);
491 /* Classify the elements of the scatterlist. Write dma_address
492 of each element with:
493 0 : Followers all physically adjacent.
494 1 : Followers all virtually adjacent.
495 -1 : Not leader, physically adjacent to previous.
496 -2 : Not leader, virtually adjacent to previous.
497 Write dma_length of each leader with the combined lengths of
498 the mergable followers. */
500 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
501 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
504 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
507 unsigned long next_paddr;
508 struct scatterlist *leader;
509 long leader_flag, leader_length;
510 unsigned int max_seg_size;
514 leader_length = leader->length;
515 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
517 /* we will not marge sg without device. */
518 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
519 for (++sg; sg < end; ++sg) {
520 unsigned long addr, len;
521 addr = SG_ENT_PHYS_ADDRESS(sg);
524 if (leader_length + len > max_seg_size)
527 if (next_paddr == addr) {
528 sg->dma_address = -1;
529 leader_length += len;
530 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
531 sg->dma_address = -2;
533 leader_length += len;
536 leader->dma_address = leader_flag;
537 leader->dma_length = leader_length;
543 next_paddr = addr + len;
546 leader->dma_address = leader_flag;
547 leader->dma_length = leader_length;
550 /* Given a scatterlist leader, choose an allocation method and fill
554 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
555 struct scatterlist *out, struct pci_iommu_arena *arena,
556 dma_addr_t max_dma, int dac_allowed)
558 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
559 long size = leader->dma_length;
560 struct scatterlist *sg;
562 long npages, dma_ofs, i;
565 /* If everything is physically contiguous, and the addresses
566 fall into the direct-map window, use it. */
567 if (leader->dma_address == 0
568 && paddr + size + __direct_map_base - 1 <= max_dma
569 && paddr + size <= __direct_map_size) {
570 out->dma_address = paddr + __direct_map_base;
571 out->dma_length = size;
573 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
574 __va(paddr), size, out->dma_address);
580 /* If physically contiguous and DAC is available, use it. */
581 if (leader->dma_address == 0 && dac_allowed) {
582 out->dma_address = paddr + alpha_mv.pci_dac_offset;
583 out->dma_length = size;
585 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
586 __va(paddr), size, out->dma_address);
591 /* Otherwise, we'll use the iommu to make the pages virtually
595 npages = calc_npages(paddr + size);
596 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
598 /* If we attempted a direct map above but failed, die. */
599 if (leader->dma_address == 0)
602 /* Otherwise, break up the remaining virtually contiguous
603 hunks into individual direct maps and retry. */
604 sg_classify(dev, leader, end, 0);
605 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
608 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
609 out->dma_length = size;
611 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
612 __va(paddr), size, out->dma_address, npages);
614 /* All virtually contiguous. We need to find the length of each
615 physically contiguous subsegment to fill in the ptes. */
616 ptes = &arena->ptes[dma_ofs];
620 struct scatterlist *last_sg = sg;
624 paddr = SG_ENT_PHYS_ADDRESS(sg);
626 while (sg+1 < end && (int) sg[1].dma_address == -1) {
627 size += sg[1].length;
631 npages = calc_npages((paddr & ~PAGE_MASK) + size);
634 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
635 *ptes++ = mk_iommu_pte(paddr);
638 DBGA(" (%ld) [%p,%x] np %ld\n",
639 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
640 last_sg->length, npages);
641 while (++last_sg <= sg) {
642 DBGA(" (%ld) [%p,%x] cont\n",
643 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
647 } while (++sg < end && (int) sg->dma_address < 0);
653 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
656 struct scatterlist *start, *end, *out;
657 struct pci_controller *hose;
658 struct pci_iommu_arena *arena;
663 if (direction == PCI_DMA_NONE)
666 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
668 dev = pdev ? &pdev->dev : NULL;
670 /* Fast path single entry scatterlists. */
672 sg->dma_length = sg->length;
674 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
675 sg->length, dac_allowed);
676 return sg->dma_address != 0;
682 /* First, prepare information about the entries. */
683 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
685 /* Second, figure out where we're going to map things. */
686 if (alpha_mv.mv_pci_tbi) {
687 hose = pdev ? pdev->sysdata : pci_isa_hose;
688 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
689 arena = hose->sg_pci;
690 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
691 arena = hose->sg_isa;
698 /* Third, iterate over the scatterlist leaders and allocate
699 dma space as needed. */
700 for (out = sg; sg < end; ++sg) {
701 if ((int) sg->dma_address < 0)
703 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
708 /* Mark the end of the list for pci_unmap_sg. */
712 if (out - start == 0)
713 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
714 DBGA("pci_map_sg: %ld entries\n", out - start);
719 printk(KERN_WARNING "pci_map_sg failed: "
720 "could not allocate dma page tables\n");
722 /* Some allocation failed while mapping the scatterlist
723 entries. Unmap them now. */
725 pci_unmap_sg(pdev, start, out - start, direction);
728 EXPORT_SYMBOL(pci_map_sg);
730 /* Unmap a set of streaming mode DMA translations. Again, cpu read
731 rules concerning calls here are the same as for pci_unmap_single()
735 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
739 struct pci_controller *hose;
740 struct pci_iommu_arena *arena;
741 struct scatterlist *end;
743 dma_addr_t fbeg, fend;
745 if (direction == PCI_DMA_NONE)
748 if (! alpha_mv.mv_pci_tbi)
751 hose = pdev ? pdev->sysdata : pci_isa_hose;
752 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
753 arena = hose->sg_pci;
754 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
755 arena = hose->sg_isa;
759 spin_lock_irqsave(&arena->lock, flags);
761 for (end = sg + nents; sg < end; ++sg) {
767 addr = sg->dma_address;
768 size = sg->dma_length;
772 if (addr > 0xffffffff) {
773 /* It's a DAC address -- nothing to do. */
774 DBGA(" (%ld) DAC [%lx,%lx]\n",
775 sg - end + nents, addr, size);
779 if (addr >= __direct_map_base
780 && addr < __direct_map_base + __direct_map_size) {
782 DBGA(" (%ld) direct [%lx,%lx]\n",
783 sg - end + nents, addr, size);
787 DBGA(" (%ld) sg [%lx,%lx]\n",
788 sg - end + nents, addr, size);
790 npages = calc_npages((addr & ~PAGE_MASK) + size);
791 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
792 iommu_arena_free(arena, ofs, npages);
794 tend = addr + size - 1;
795 if (fbeg > addr) fbeg = addr;
796 if (fend < tend) fend = tend;
799 /* If we're freeing ptes above the `next_entry' pointer (they
800 may have snuck back into the TLB since the last wrap flush),
801 we need to flush the TLB before reallocating the latter. */
802 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
803 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
805 spin_unlock_irqrestore(&arena->lock, flags);
807 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
809 EXPORT_SYMBOL(pci_unmap_sg);
812 /* Return whether the given PCI device DMA address mask can be
813 supported properly. */
816 pci_dma_supported(struct pci_dev *pdev, u64 mask)
818 struct pci_controller *hose;
819 struct pci_iommu_arena *arena;
821 /* If there exists a direct map, and the mask fits either
822 the entire direct mapped space or the total system memory as
823 shifted by the map base */
824 if (__direct_map_size != 0
825 && (__direct_map_base + __direct_map_size - 1 <= mask ||
826 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
829 /* Check that we have a scatter-gather arena that fits. */
830 hose = pdev ? pdev->sysdata : pci_isa_hose;
831 arena = hose->sg_isa;
832 if (arena && arena->dma_base + arena->size - 1 <= mask)
834 arena = hose->sg_pci;
835 if (arena && arena->dma_base + arena->size - 1 <= mask)
838 /* As last resort try ZONE_DMA. */
839 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
844 EXPORT_SYMBOL(pci_dma_supported);
848 * AGP GART extensions to the IOMMU
851 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
857 if (!arena) return -EINVAL;
859 spin_lock_irqsave(&arena->lock, flags);
861 /* Search for N empty ptes. */
863 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
865 spin_unlock_irqrestore(&arena->lock, flags);
869 /* Success. Mark them all reserved (ie not zero and invalid)
870 for the iommu tlb that could load them from under us.
871 They will be filled in with valid bits by _bind() */
872 for (i = 0; i < pg_count; ++i)
873 ptes[p+i] = IOMMU_RESERVED_PTE;
875 arena->next_entry = p + pg_count;
876 spin_unlock_irqrestore(&arena->lock, flags);
882 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
887 if (!arena) return -EINVAL;
891 /* Make sure they're all reserved first... */
892 for(i = pg_start; i < pg_start + pg_count; i++)
893 if (ptes[i] != IOMMU_RESERVED_PTE)
896 iommu_arena_free(arena, pg_start, pg_count);
901 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
902 unsigned long *physaddrs)
908 if (!arena) return -EINVAL;
910 spin_lock_irqsave(&arena->lock, flags);
914 for(j = pg_start; j < pg_start + pg_count; j++) {
915 if (ptes[j] != IOMMU_RESERVED_PTE) {
916 spin_unlock_irqrestore(&arena->lock, flags);
921 for(i = 0, j = pg_start; i < pg_count; i++, j++)
922 ptes[j] = mk_iommu_pte(physaddrs[i]);
924 spin_unlock_irqrestore(&arena->lock, flags);
930 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
935 if (!arena) return -EINVAL;
937 p = arena->ptes + pg_start;
938 for(i = 0; i < pg_count; i++)
939 p[i] = IOMMU_RESERVED_PTE;
944 /* True if the machine supports DAC addressing, and DEV can
945 make use of it given MASK. */
948 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
950 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
953 /* If this is not set, the machine doesn't support DAC at all. */
957 /* The device has to be able to address our DAC bit. */
958 if ((dac_offset & dev->dma_mask) != dac_offset)
961 /* If both conditions above are met, we are fine. */
962 DBGA("pci_dac_dma_supported %s from %p\n",
963 ok ? "yes" : "no", __builtin_return_address(0));
968 /* Helper for generic DMA-mapping functions. */
971 alpha_gendev_to_pci(struct device *dev)
973 if (dev && dev->bus == &pci_bus_type)
974 return to_pci_dev(dev);
976 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
980 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
981 bridge is bus master then). */
982 if (!dev || !dev->dma_mask || !*dev->dma_mask)
985 /* For EISA bus masters, return isa_bridge (it might have smaller
986 dma_mask due to wiring limitations). */
987 if (*dev->dma_mask >= isa_bridge->dma_mask)
990 /* This assumes ISA bus master with dma_mask 0xffffff. */
993 EXPORT_SYMBOL(alpha_gendev_to_pci);
996 dma_set_mask(struct device *dev, u64 mask)
998 if (!dev->dma_mask ||
999 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
1002 *dev->dma_mask = mask;
1006 EXPORT_SYMBOL(dma_set_mask);