.flush_tlb_kernel = native_flush_tlb_global,
        .flush_tlb_single = native_flush_tlb_single,
 
+       .map_pt_hook = (void *)native_nop,
+
        .alloc_pt = (void *)native_nop,
        .alloc_pd = (void *)native_nop,
        .alloc_pd_clone = (void *)native_nop,
 
 #define vmi_check_page_type(p,t) do { } while (0)
 #endif
 
+static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
+{
+       /*
+        * Internally, the VMI ROM must map virtual addresses to physical
+        * addresses for processing MMU updates.  By the time MMU updates
+        * are issued, this information is typically already lost.
+        * Fortunately, the VMI provides a cache of mapping slots for active
+        * page tables.
+        *
+        * We use slot zero for the linear mapping of physical memory, and
+        * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+        *
+        *  args:                 SLOT                 VA    COUNT PFN
+        */
+       BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+       vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
 static void vmi_allocate_pt(u32 pfn)
 {
        vmi_set_page_type(pfn, VMI_PAGE_L1);
        vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
        vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
 
+       paravirt_ops.map_pt_hook = vmi_map_pt_hook;
        paravirt_ops.alloc_pt = vmi_allocate_pt;
        paravirt_ops.alloc_pd = vmi_allocate_pd;
        paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
 
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(u32 addr);
 
+       void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn);
+
        void (*alloc_pt)(u32 pfn);
        void (*alloc_pd)(u32 pfn);
        void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
 #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
 #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
 
+#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
+
 #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
 #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
 
 
  */
 #define pte_update(mm, addr, ptep)             do { } while (0)
 #define pte_update_defer(mm, addr, ptep)       do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn)    do { } while (0)
 #endif
 
 /*
 #endif
 
 #if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address)                           \
+({                                                             \
+       pte_t *__ptep;                                          \
+       unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT;           \
+       __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
+       paravirt_map_pt_hook(KM_PTE0,__ptep, pfn);              \
+       __ptep = __ptep + pte_index(address);                   \
+       __ptep;                                                 \
+})
+#define pte_offset_map_nested(dir, address)                    \
+({                                                             \
+       pte_t *__ptep;                                          \
+       unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT;           \
+       __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
+       paravirt_map_pt_hook(KM_PTE1,__ptep, pfn);              \
+       __ptep = __ptep + pte_index(address);                   \
+       __ptep;                                                 \
+})
 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
 #else