1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
15 #include <linux/pci.h>
18 #include <asm/iommu.h>
20 #include "iommu_common.h"
22 #define STC_CTXMATCH_ADDR(STC, CTX) \
23 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
24 #define STC_FLUSHFLAG_INIT(STC) \
25 (*((STC)->strbuf_flushflag) = 0UL)
26 #define STC_FLUSHFLAG_SET(STC) \
27 (*((STC)->strbuf_flushflag) != 0UL)
29 #define iommu_read(__reg) \
31 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37 #define iommu_write(__reg, __val) \
38 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : "r" (__val), "r" (__reg), \
41 "i" (ASI_PHYS_BYPASS_EC_E))
43 /* Must be invoked under the IOMMU lock. */
44 static void __iommu_flushall(struct iommu *iommu)
46 if (iommu->iommu_flushinv) {
47 iommu_write(iommu->iommu_flushinv, ~(u64)0);
52 tag = iommu->iommu_tags;
53 for (entry = 0; entry < 16; entry++) {
58 /* Ensure completion of previous PIO writes. */
59 (void) iommu_read(iommu->write_complete_reg);
63 #define IOPTE_CONSISTENT(CTX) \
64 (IOPTE_VALID | IOPTE_CACHE | \
65 (((CTX) << 47) & IOPTE_CONTEXT))
67 #define IOPTE_STREAMING(CTX) \
68 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
70 /* Existing mappings are never marked invalid, instead they
71 * are pointed to a dummy page.
73 #define IOPTE_IS_DUMMY(iommu, iopte) \
74 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
76 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
78 unsigned long val = iopte_val(*iopte);
81 val |= iommu->dummy_page_pa;
83 iopte_val(*iopte) = val;
86 /* Based largely upon the ppc64 iommu allocator. */
87 static long arena_alloc(struct iommu *iommu, unsigned long npages)
89 struct iommu_arena *arena = &iommu->arena;
90 unsigned long n, i, start, end, limit;
98 n = find_next_zero_bit(arena->map, limit, start);
100 if (unlikely(end >= limit)) {
101 if (likely(pass < 1)) {
104 __iommu_flushall(iommu);
108 /* Scanned the whole thing, give up. */
113 for (i = n; i < end; i++) {
114 if (test_bit(i, arena->map)) {
120 for (i = n; i < end; i++)
121 __set_bit(i, arena->map);
128 static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
132 for (i = base; i < (base + npages); i++)
133 __clear_bit(i, arena->map);
136 int iommu_table_init(struct iommu *iommu, int tsbsize,
137 u32 dma_offset, u32 dma_addr_mask)
139 unsigned long i, tsbbase, order, sz, num_tsb_entries;
141 num_tsb_entries = tsbsize / sizeof(iopte_t);
143 /* Setup initial software IOMMU state. */
144 spin_lock_init(&iommu->lock);
145 iommu->ctx_lowest_free = 1;
146 iommu->page_table_map_base = dma_offset;
147 iommu->dma_addr_mask = dma_addr_mask;
149 /* Allocate and initialize the free area map. */
150 sz = num_tsb_entries / 8;
151 sz = (sz + 7UL) & ~7UL;
152 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
153 if (!iommu->arena.map) {
154 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
157 iommu->arena.limit = num_tsb_entries;
159 /* Allocate and initialize the dummy page which we
160 * set inactive IO PTEs to point to.
162 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
163 if (!iommu->dummy_page) {
164 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
167 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
168 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
170 /* Now allocate and setup the IOMMU page table itself. */
171 order = get_order(tsbsize);
172 tsbbase = __get_free_pages(GFP_KERNEL, order);
174 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
175 goto out_free_dummy_page;
177 iommu->page_table = (iopte_t *)tsbbase;
179 for (i = 0; i < num_tsb_entries; i++)
180 iopte_make_dummy(iommu, &iommu->page_table[i]);
185 free_page(iommu->dummy_page);
186 iommu->dummy_page = 0UL;
189 kfree(iommu->arena.map);
190 iommu->arena.map = NULL;
195 static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
199 entry = arena_alloc(iommu, npages);
200 if (unlikely(entry < 0))
203 return iommu->page_table + entry;
206 static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
208 arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
211 static int iommu_alloc_ctx(struct iommu *iommu)
213 int lowest = iommu->ctx_lowest_free;
214 int sz = IOMMU_NUM_CTXS - lowest;
215 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
217 if (unlikely(n == sz)) {
218 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
219 if (unlikely(n == lowest)) {
220 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
225 __set_bit(n, iommu->ctx_bitmap);
230 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
233 __clear_bit(ctx, iommu->ctx_bitmap);
234 if (ctx < iommu->ctx_lowest_free)
235 iommu->ctx_lowest_free = ctx;
239 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
240 dma_addr_t *dma_addrp, gfp_t gfp)
244 unsigned long flags, order, first_page;
248 size = IO_PAGE_ALIGN(size);
249 order = get_order(size);
253 first_page = __get_free_pages(gfp, order);
254 if (first_page == 0UL)
256 memset((char *)first_page, 0, PAGE_SIZE << order);
258 iommu = dev->archdata.iommu;
260 spin_lock_irqsave(&iommu->lock, flags);
261 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
262 spin_unlock_irqrestore(&iommu->lock, flags);
264 if (unlikely(iopte == NULL)) {
265 free_pages(first_page, order);
269 *dma_addrp = (iommu->page_table_map_base +
270 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
271 ret = (void *) first_page;
272 npages = size >> IO_PAGE_SHIFT;
273 first_page = __pa(first_page);
275 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
277 (first_page & IOPTE_PAGE));
279 first_page += IO_PAGE_SIZE;
285 static void dma_4u_free_coherent(struct device *dev, size_t size,
286 void *cpu, dma_addr_t dvma)
290 unsigned long flags, order, npages;
292 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
293 iommu = dev->archdata.iommu;
294 iopte = iommu->page_table +
295 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
297 spin_lock_irqsave(&iommu->lock, flags);
299 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
301 spin_unlock_irqrestore(&iommu->lock, flags);
303 order = get_order(size);
305 free_pages((unsigned long)cpu, order);
308 static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
309 enum dma_data_direction direction)
312 struct strbuf *strbuf;
314 unsigned long flags, npages, oaddr;
315 unsigned long i, base_paddr, ctx;
317 unsigned long iopte_protection;
319 iommu = dev->archdata.iommu;
320 strbuf = dev->archdata.stc;
322 if (unlikely(direction == DMA_NONE))
325 oaddr = (unsigned long)ptr;
326 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
327 npages >>= IO_PAGE_SHIFT;
329 spin_lock_irqsave(&iommu->lock, flags);
330 base = alloc_npages(iommu, npages);
332 if (iommu->iommu_ctxflush)
333 ctx = iommu_alloc_ctx(iommu);
334 spin_unlock_irqrestore(&iommu->lock, flags);
339 bus_addr = (iommu->page_table_map_base +
340 ((base - iommu->page_table) << IO_PAGE_SHIFT));
341 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
342 base_paddr = __pa(oaddr & IO_PAGE_MASK);
343 if (strbuf->strbuf_enabled)
344 iopte_protection = IOPTE_STREAMING(ctx);
346 iopte_protection = IOPTE_CONSISTENT(ctx);
347 if (direction != DMA_TO_DEVICE)
348 iopte_protection |= IOPTE_WRITE;
350 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
351 iopte_val(*base) = iopte_protection | base_paddr;
356 iommu_free_ctx(iommu, ctx);
358 if (printk_ratelimit())
360 return DMA_ERROR_CODE;
363 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
364 u32 vaddr, unsigned long ctx, unsigned long npages,
365 enum dma_data_direction direction)
369 if (strbuf->strbuf_ctxflush &&
370 iommu->iommu_ctxflush) {
371 unsigned long matchreg, flushreg;
374 flushreg = strbuf->strbuf_ctxflush;
375 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
377 iommu_write(flushreg, ctx);
378 val = iommu_read(matchreg);
385 iommu_write(flushreg, ctx);
388 val = iommu_read(matchreg);
390 printk(KERN_WARNING "strbuf_flush: ctx flush "
391 "timeout matchreg[%lx] ctx[%lx]\n",
399 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
400 iommu_write(strbuf->strbuf_pflush, vaddr);
404 /* If the device could not have possibly put dirty data into
405 * the streaming cache, no flush-flag synchronization needs
408 if (direction == DMA_TO_DEVICE)
411 STC_FLUSHFLAG_INIT(strbuf);
412 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
413 (void) iommu_read(iommu->write_complete_reg);
416 while (!STC_FLUSHFLAG_SET(strbuf)) {
424 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
425 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
429 static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
430 size_t sz, enum dma_data_direction direction)
433 struct strbuf *strbuf;
435 unsigned long flags, npages, ctx, i;
437 if (unlikely(direction == DMA_NONE)) {
438 if (printk_ratelimit())
443 iommu = dev->archdata.iommu;
444 strbuf = dev->archdata.stc;
446 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
447 npages >>= IO_PAGE_SHIFT;
448 base = iommu->page_table +
449 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
450 bus_addr &= IO_PAGE_MASK;
452 spin_lock_irqsave(&iommu->lock, flags);
454 /* Record the context, if any. */
456 if (iommu->iommu_ctxflush)
457 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
459 /* Step 1: Kick data out of streaming buffers if necessary. */
460 if (strbuf->strbuf_enabled)
461 strbuf_flush(strbuf, iommu, bus_addr, ctx,
464 /* Step 2: Clear out TSB entries. */
465 for (i = 0; i < npages; i++)
466 iopte_make_dummy(iommu, base + i);
468 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
470 iommu_free_ctx(iommu, ctx);
472 spin_unlock_irqrestore(&iommu->lock, flags);
475 #define SG_ENT_PHYS_ADDRESS(SG) \
476 (__pa(page_address((SG)->page)) + (SG)->offset)
478 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
479 int nused, int nelems,
480 unsigned long iopte_protection)
482 struct scatterlist *dma_sg = sg;
483 struct scatterlist *sg_end = sg + nelems;
486 for (i = 0; i < nused; i++) {
487 unsigned long pteval = ~0UL;
490 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
492 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
494 unsigned long offset;
497 /* If we are here, we know we have at least one
498 * more page to map. So walk forward until we
499 * hit a page crossing, and begin creating new
500 * mappings from that spot.
505 tmp = SG_ENT_PHYS_ADDRESS(sg);
507 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
508 pteval = tmp & IO_PAGE_MASK;
509 offset = tmp & (IO_PAGE_SIZE - 1UL);
512 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
513 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
515 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
521 pteval = iopte_protection | (pteval & IOPTE_PAGE);
523 *iopte++ = __iopte(pteval);
524 pteval += IO_PAGE_SIZE;
525 len -= (IO_PAGE_SIZE - offset);
530 pteval = (pteval & IOPTE_PAGE) + len;
533 /* Skip over any tail mappings we've fully mapped,
534 * adjusting pteval along the way. Stop when we
535 * detect a page crossing event.
537 while (sg < sg_end &&
538 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
539 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
541 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
542 pteval += sg->length;
545 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
547 } while (dma_npages != 0);
552 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
553 int nelems, enum dma_data_direction direction)
556 struct strbuf *strbuf;
557 unsigned long flags, ctx, npages, iopte_protection;
560 struct scatterlist *sgtmp;
563 /* Fast path single entry scatterlists. */
565 sglist->dma_address =
566 dma_4u_map_single(dev,
567 (page_address(sglist->page) +
569 sglist->length, direction);
570 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
572 sglist->dma_length = sglist->length;
576 iommu = dev->archdata.iommu;
577 strbuf = dev->archdata.stc;
579 if (unlikely(direction == DMA_NONE))
582 /* Step 1: Prepare scatter list. */
584 npages = prepare_sg(sglist, nelems);
586 /* Step 2: Allocate a cluster and context, if necessary. */
588 spin_lock_irqsave(&iommu->lock, flags);
590 base = alloc_npages(iommu, npages);
592 if (iommu->iommu_ctxflush)
593 ctx = iommu_alloc_ctx(iommu);
595 spin_unlock_irqrestore(&iommu->lock, flags);
600 dma_base = iommu->page_table_map_base +
601 ((base - iommu->page_table) << IO_PAGE_SHIFT);
603 /* Step 3: Normalize DMA addresses. */
607 while (used && sgtmp->dma_length) {
608 sgtmp->dma_address += dma_base;
612 used = nelems - used;
614 /* Step 4: Create the mappings. */
615 if (strbuf->strbuf_enabled)
616 iopte_protection = IOPTE_STREAMING(ctx);
618 iopte_protection = IOPTE_CONSISTENT(ctx);
619 if (direction != DMA_TO_DEVICE)
620 iopte_protection |= IOPTE_WRITE;
622 fill_sg(base, sglist, used, nelems, iopte_protection);
625 verify_sglist(sglist, nelems, base, npages);
631 iommu_free_ctx(iommu, ctx);
633 if (printk_ratelimit())
638 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
639 int nelems, enum dma_data_direction direction)
642 struct strbuf *strbuf;
644 unsigned long flags, ctx, i, npages;
647 if (unlikely(direction == DMA_NONE)) {
648 if (printk_ratelimit())
652 iommu = dev->archdata.iommu;
653 strbuf = dev->archdata.stc;
655 bus_addr = sglist->dma_address & IO_PAGE_MASK;
657 for (i = 1; i < nelems; i++)
658 if (sglist[i].dma_length == 0)
661 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
662 bus_addr) >> IO_PAGE_SHIFT;
664 base = iommu->page_table +
665 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
667 spin_lock_irqsave(&iommu->lock, flags);
669 /* Record the context, if any. */
671 if (iommu->iommu_ctxflush)
672 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
674 /* Step 1: Kick data out of streaming buffers if necessary. */
675 if (strbuf->strbuf_enabled)
676 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
678 /* Step 2: Clear out the TSB entries. */
679 for (i = 0; i < npages; i++)
680 iopte_make_dummy(iommu, base + i);
682 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
684 iommu_free_ctx(iommu, ctx);
686 spin_unlock_irqrestore(&iommu->lock, flags);
689 static void dma_4u_sync_single_for_cpu(struct device *dev,
690 dma_addr_t bus_addr, size_t sz,
691 enum dma_data_direction direction)
694 struct strbuf *strbuf;
695 unsigned long flags, ctx, npages;
697 iommu = dev->archdata.iommu;
698 strbuf = dev->archdata.stc;
700 if (!strbuf->strbuf_enabled)
703 spin_lock_irqsave(&iommu->lock, flags);
705 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
706 npages >>= IO_PAGE_SHIFT;
707 bus_addr &= IO_PAGE_MASK;
709 /* Step 1: Record the context, if any. */
711 if (iommu->iommu_ctxflush &&
712 strbuf->strbuf_ctxflush) {
715 iopte = iommu->page_table +
716 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
717 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
720 /* Step 2: Kick data out of streaming buffers. */
721 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
723 spin_unlock_irqrestore(&iommu->lock, flags);
726 static void dma_4u_sync_sg_for_cpu(struct device *dev,
727 struct scatterlist *sglist, int nelems,
728 enum dma_data_direction direction)
731 struct strbuf *strbuf;
732 unsigned long flags, ctx, npages, i;
735 iommu = dev->archdata.iommu;
736 strbuf = dev->archdata.stc;
738 if (!strbuf->strbuf_enabled)
741 spin_lock_irqsave(&iommu->lock, flags);
743 /* Step 1: Record the context, if any. */
745 if (iommu->iommu_ctxflush &&
746 strbuf->strbuf_ctxflush) {
749 iopte = iommu->page_table +
750 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
751 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
754 /* Step 2: Kick data out of streaming buffers. */
755 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
756 for(i = 1; i < nelems; i++)
757 if (!sglist[i].dma_length)
760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
761 - bus_addr) >> IO_PAGE_SHIFT;
762 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
764 spin_unlock_irqrestore(&iommu->lock, flags);
767 const struct dma_ops sun4u_dma_ops = {
768 .alloc_coherent = dma_4u_alloc_coherent,
769 .free_coherent = dma_4u_free_coherent,
770 .map_single = dma_4u_map_single,
771 .unmap_single = dma_4u_unmap_single,
772 .map_sg = dma_4u_map_sg,
773 .unmap_sg = dma_4u_unmap_sg,
774 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
775 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
778 const struct dma_ops *dma_ops = &sun4u_dma_ops;
779 EXPORT_SYMBOL(dma_ops);
781 int dma_supported(struct device *dev, u64 device_mask)
783 struct iommu *iommu = dev->archdata.iommu;
784 u64 dma_addr_mask = iommu->dma_addr_mask;
786 if (device_mask >= (1UL << 32UL))
789 if ((device_mask & dma_addr_mask) == dma_addr_mask)
793 if (dev->bus == &pci_bus_type)
794 return pci_dma_supported(to_pci_dev(dev), device_mask);
799 EXPORT_SYMBOL(dma_supported);
801 int dma_set_mask(struct device *dev, u64 dma_mask)
804 if (dev->bus == &pci_bus_type)
805 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
809 EXPORT_SYMBOL(dma_set_mask);