]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/arm/plat-omap/mmu.c
ARM:OMAP: Add initial TWL support for non-MPU MMU framework
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
index 3f572fd7f0b5d09537b1d5f29ea898c86fe080a1..30c646e01e1ea304a0d12faa0cfbe904052ee187 100644 (file)
@@ -3,10 +3,12 @@
  *
  * OMAP MMU management framework
  *
- * Copyright (C) 2002-2005 Nokia Corporation
+ * Copyright (C) 2002-2006 Nokia Corporation
  *
  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
- *        and Paul Mundt <paul.mundt@nokia.com>
+ *        and Paul Mundt <lethal@linux-sh.org>
+ *
+ * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define ORDER_64KB     (16 - PAGE_SHIFT)
 #define ORDER_4KB      (12 - PAGE_SHIFT)
 
+#define MMU_CNTL_EMUTLBUPDATE  (1<<3)
+#define MMU_CNTL_TWLENABLE     (1<<2)
+#define MMU_CNTL_MMUENABLE     (1<<1)
+
 static mempool_t *mempool_1M;
 static mempool_t *mempool_64K;
 
@@ -366,7 +372,141 @@ omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
 
 /*
- * MMU operations
+ * PTE operations
+ */
+static inline void
+omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
+                      unsigned long phys, int prot)
+{
+       pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
+       if (virt & (1 << SECTION_SHIFT))
+               pmdp++;
+       *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
+       flush_pmd_entry(pmdp);
+}
+
+static inline void
+omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
+                           unsigned long phys, int prot)
+{
+       int i;
+       for (i = 0; i < 16; i += 1) {
+               omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
+               virt += (PGDIR_SIZE / 2);
+       }
+}
+
+static inline int
+omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
+                   unsigned long phys, pgprot_t prot)
+{
+       pte_t *ptep;
+       pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
+
+       if (!(prot & PTE_TYPE_MASK))
+               prot |= PTE_TYPE_SMALL;
+
+       if (pmd_none(*pmdp)) {
+               ptep = pte_alloc_one_kernel(mm, virt);
+               if (ptep == NULL)
+                       return -ENOMEM;
+               pmd_populate_kernel(mm, pmdp, ptep);
+       }
+       ptep = pte_offset_kernel(pmdp, virt);
+       ptep -= PTRS_PER_PTE;
+       *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
+       flush_pmd_entry((pmd_t *)ptep);
+       return 0;
+}
+
+static inline int
+omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
+                        unsigned long phys, pgprot_t prot)
+{
+       int i, ret;
+       for (i = 0; i < 16; i += 1) {
+               ret = omap_mmu_alloc_page(mm, virt, phys,
+                                         prot | PTE_TYPE_LARGE);
+               if (ret)
+                       return -ENOMEM; /* only 1st time */
+               virt += PAGE_SIZE;
+       }
+       return 0;
+}
+
+static int omap_mmu_load_pte(struct omap_mmu *mmu,
+                            struct omap_mmu_tlb_entry *e)
+{
+       int ret = 0;
+       struct mm_struct *mm = mmu->twl_mm;
+       const unsigned long va = e->va;
+       const unsigned long pa = e->pa;
+       const pgprot_t prot = mmu->ops->pte_get_attr(e);
+
+       spin_lock(&mm->page_table_lock);
+
+       switch (e->pgsz) {
+       case OMAP_MMU_CAM_PAGESIZE_16MB:
+               omap_mmu_alloc_supersection(mm, va, pa, prot);
+               break;
+       case OMAP_MMU_CAM_PAGESIZE_1MB:
+               omap_mmu_alloc_section(mm, va, pa, prot);
+               break;
+       case OMAP_MMU_CAM_PAGESIZE_64KB:
+               ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
+               break;
+       case OMAP_MMU_CAM_PAGESIZE_4KB:
+               ret = omap_mmu_alloc_page(mm, va, pa, prot);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       spin_unlock(&mm->page_table_lock);
+
+       return ret;
+}
+
+static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
+{
+       pte_t *ptep, *end;
+       pmd_t *pmdp;
+       struct mm_struct *mm = mmu->twl_mm;
+
+       spin_lock(&mm->page_table_lock);
+
+       pmdp = pmd_offset(pgd_offset(mm, virt), virt);
+
+       if (pmd_none(*pmdp))
+               goto out;
+
+       if (!pmd_table(*pmdp))
+               goto invalidate_pmd;
+
+       ptep = pte_offset_kernel(pmdp, virt);
+       pte_clear(mm, virt, ptep);
+       flush_pmd_entry((pmd_t *)ptep);
+
+       /* zap pte */
+       end = pmd_page_vaddr(*pmdp);
+       ptep = end - PTRS_PER_PTE;
+       while (ptep < end) {
+               if (!pte_none(*ptep))
+                       goto out;
+               ptep++;
+       }
+       pte_free_kernel(pmd_page_vaddr(*pmdp));
+
+ invalidate_pmd:
+       pmd_clear(pmdp);
+       flush_pmd_entry(pmdp);
+ out:
+       spin_unlock(&mm->page_table_lock);
+}
+
+/*
+ * TLB operations
  */
 static struct cam_ram_regset *
 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
@@ -539,6 +679,33 @@ static void omap_mmu_gflush(struct omap_mmu *mmu)
        clk_disable(mmu->clk);
 }
 
+int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
+                           struct omap_mmu_tlb_entry *entry)
+{
+       int ret = -1;
+       if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
+               /*XXX use PG_flag for prsvd */
+               ret = omap_mmu_load_pte(mmu, entry);
+               if (ret)
+                       return ret;
+       }
+       if (entry->tlb)
+               ret = omap_mmu_load_tlb_entry(mmu, entry);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
+
+int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
+{
+       int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
+       if (ret)
+               return ret;
+       if (mmu->ops->pte_get_attr)
+               omap_mmu_clear_pte(mmu, vadr);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
+
 /*
  * omap_mmu_exmap()
  *
@@ -680,9 +847,9 @@ found_free:
        if (status < 0)
                goto fail;
 
-       /* loading DSP TLB entry */
+       /* loading DSP PTE entry */
        INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
-       status = omap_mmu_load_tlb_entry(mmu, &tlb_ent);
+       status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
        if (status < 0) {
                exmap_clear_armmmu((unsigned long)_vadr, unit);
                goto fail;
@@ -764,8 +931,8 @@ found_map:
                up_write(&mmu->exmap_sem);
                return -EINVAL;
        }
-       /* clearing DSP TLB entry */
-       omap_mmu_clear_tlb_entry(mmu, dspadr);
+       /* clearing DSP PTE entry */
+       omap_mmu_clear_pte_entry(mmu, dspadr);
 
        /* clear ARM MMU and free buffer */
        size = unmap_free_arm(ent);
@@ -833,7 +1000,7 @@ void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
        exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
        INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
        INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
-       omap_mmu_load_tlb_entry(mmu, &tlb_ent);
+       omap_mmu_load_pte_entry(mmu, &tlb_ent);
 }
 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
 
@@ -865,10 +1032,18 @@ EXPORT_SYMBOL_GPL(omap_mmu_disable);
 
 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
 {
+       u32 val = MMU_CNTL_MMUENABLE;
+       u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
+
        if (likely(reset))
                omap_mmu_reset(mmu);
 
-       omap_mmu_write_reg(mmu, 0x2, MMU_CNTL);
+       if (mmu->ops->pte_get_attr) {
+               omap_mmu_write_reg(mmu, pa, MMU_TTB);
+               val |= MMU_CNTL_TWLENABLE;
+       }
+
+       omap_mmu_write_reg(mmu, val, MMU_CNTL);
 }
 EXPORT_SYMBOL_GPL(omap_mmu_enable);
 
@@ -1282,6 +1457,15 @@ int omap_mmu_register(struct omap_mmu *mmu)
        if (!mmu->exmap_tbl)
                return -ENOMEM;
 
+       if (mmu->ops->pte_get_attr) {
+               struct mm_struct *mm =  mm_alloc();
+               if (!mm) {
+                       ret = -ENOMEM;
+                       goto err_mm_alloc;
+               }
+               mmu->twl_mm = mm;
+       }
+
        ret = device_register(&mmu->dev);
        if (unlikely(ret))
                goto err_dev_register;
@@ -1322,6 +1506,9 @@ err_dev_create_mmu:
 err_mmu_init:
        device_unregister(&mmu->dev);
 err_dev_register:
+       kfree(mmu->twl_mm);
+       mmu->twl_mm = NULL;
+err_mm_alloc:
        kfree(mmu->exmap_tbl);
        mmu->exmap_tbl = NULL;
        return ret;
@@ -1343,6 +1530,13 @@ void omap_mmu_unregister(struct omap_mmu *mmu)
        kfree(mmu->exmap_tbl);
        mmu->exmap_tbl = NULL;
 
+       if (mmu->ops->pte_get_attr) {
+               if (mmu->twl_mm) {
+                       __mmdrop(mmu->twl_mm);
+                       mmu->twl_mm = NULL;
+               }
+       }
+
        device_unregister(&mmu->dev);
 }
 EXPORT_SYMBOL_GPL(omap_mmu_unregister);