]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/mmu.c
Merge branch 'for-2.6.28' of git://linux-nfs.org/~bfields/linux
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / mmu.c
index 90f01169c8f00d64dedca4207972a9e2261c579c..2a5e64881d9bf2476bfef280ca309ffe086e52e1 100644 (file)
@@ -70,6 +70,9 @@ static int dbg = 0;
 module_param(dbg, bool, 0644);
 #endif
 
+static int oos_shadow = 1;
+module_param(oos_shadow, bool, 0644);
+
 #ifndef MMU_DEBUG
 #define ASSERT(x) do { } while (0)
 #else
@@ -147,6 +150,12 @@ struct kvm_shadow_walk {
                     u64 addr, u64 *spte, int level);
 };
 
+struct kvm_unsync_walk {
+       int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
+};
+
+typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
@@ -652,8 +661,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
 
        if (write_protected)
                kvm_flush_remote_tlbs(kvm);
-
-       account_shadowed(kvm, gfn);
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
@@ -862,6 +869,77 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
        BUG();
 }
 
+
+static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                           mmu_parent_walk_fn fn)
+{
+       struct kvm_pte_chain *pte_chain;
+       struct hlist_node *node;
+       struct kvm_mmu_page *parent_sp;
+       int i;
+
+       if (!sp->multimapped && sp->parent_pte) {
+               parent_sp = page_header(__pa(sp->parent_pte));
+               fn(vcpu, parent_sp);
+               mmu_parent_walk(vcpu, parent_sp, fn);
+               return;
+       }
+       hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
+               for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
+                       if (!pte_chain->parent_ptes[i])
+                               break;
+                       parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
+                       fn(vcpu, parent_sp);
+                       mmu_parent_walk(vcpu, parent_sp, fn);
+               }
+}
+
+static void kvm_mmu_update_unsync_bitmap(u64 *spte)
+{
+       unsigned int index;
+       struct kvm_mmu_page *sp = page_header(__pa(spte));
+
+       index = spte - sp->spt;
+       __set_bit(index, sp->unsync_child_bitmap);
+       sp->unsync_children = 1;
+}
+
+static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
+{
+       struct kvm_pte_chain *pte_chain;
+       struct hlist_node *node;
+       int i;
+
+       if (!sp->parent_pte)
+               return;
+
+       if (!sp->multimapped) {
+               kvm_mmu_update_unsync_bitmap(sp->parent_pte);
+               return;
+       }
+
+       hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
+               for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
+                       if (!pte_chain->parent_ptes[i])
+                               break;
+                       kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
+               }
+}
+
+static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       sp->unsync_children = 1;
+       kvm_mmu_update_parents_unsync(sp);
+       return 1;
+}
+
+static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
+                                       struct kvm_mmu_page *sp)
+{
+       mmu_parent_walk(vcpu, sp, unsync_walk_fn);
+       kvm_mmu_update_parents_unsync(sp);
+}
+
 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
                                    struct kvm_mmu_page *sp)
 {
@@ -877,6 +955,52 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
        return 1;
 }
 
+static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+{
+}
+
+#define for_each_unsync_children(bitmap, idx)          \
+       for (idx = find_first_bit(bitmap, 512);         \
+            idx < 512;                                 \
+            idx = find_next_bit(bitmap, 512, idx+1))
+
+static int mmu_unsync_walk(struct kvm_mmu_page *sp,
+                          struct kvm_unsync_walk *walker)
+{
+       int i, ret;
+
+       if (!sp->unsync_children)
+               return 0;
+
+       for_each_unsync_children(sp->unsync_child_bitmap, i) {
+               u64 ent = sp->spt[i];
+
+               if (is_shadow_present_pte(ent)) {
+                       struct kvm_mmu_page *child;
+                       child = page_header(ent & PT64_BASE_ADDR_MASK);
+
+                       if (child->unsync_children) {
+                               ret = mmu_unsync_walk(child, walker);
+                               if (ret)
+                                       return ret;
+                               __clear_bit(i, sp->unsync_child_bitmap);
+                       }
+
+                       if (child->unsync) {
+                               ret = walker->entry(child, walker);
+                               __clear_bit(i, sp->unsync_child_bitmap);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+       }
+
+       if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
+               sp->unsync_children = 0;
+
+       return 0;
+}
+
 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
@@ -897,6 +1021,59 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
+static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       WARN_ON(!sp->unsync);
+       sp->unsync = 0;
+       --kvm->stat.mmu_unsync;
+}
+
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+
+static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       if (sp->role.glevels != vcpu->arch.mmu.root_level) {
+               kvm_mmu_zap_page(vcpu->kvm, sp);
+               return 1;
+       }
+
+       rmap_write_protect(vcpu->kvm, sp->gfn);
+       if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
+               kvm_mmu_zap_page(vcpu->kvm, sp);
+               return 1;
+       }
+
+       kvm_mmu_flush_tlb(vcpu);
+       kvm_unlink_unsync_page(vcpu->kvm, sp);
+       return 0;
+}
+
+struct sync_walker {
+       struct kvm_vcpu *vcpu;
+       struct kvm_unsync_walk walker;
+};
+
+static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
+{
+       struct sync_walker *sync_walk = container_of(walk, struct sync_walker,
+                                                    walker);
+       struct kvm_vcpu *vcpu = sync_walk->vcpu;
+
+       kvm_sync_page(vcpu, sp);
+       return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock));
+}
+
+static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       struct sync_walker walker = {
+               .walker = { .entry = mmu_sync_fn, },
+               .vcpu = vcpu,
+       };
+
+       while (mmu_unsync_walk(sp, &walker.walker))
+               cond_resched_lock(&vcpu->kvm->mmu_lock);
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gfn_t gfn,
                                             gva_t gaddr,
@@ -910,7 +1087,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        unsigned quadrant;
        struct hlist_head *bucket;
        struct kvm_mmu_page *sp;
-       struct hlist_node *node;
+       struct hlist_node *node, *tmp;
 
        role.word = 0;
        role.glevels = vcpu->arch.mmu.root_level;
@@ -926,9 +1103,20 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                 gfn, role.word);
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
-       hlist_for_each_entry(sp, node, bucket, hash_link)
-               if (sp->gfn == gfn && sp->role.word == role.word) {
+       hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
+               if (sp->gfn == gfn) {
+                       if (sp->unsync)
+                               if (kvm_sync_page(vcpu, sp))
+                                       continue;
+
+                       if (sp->role.word != role.word)
+                               continue;
+
                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
+                       if (sp->unsync_children) {
+                               set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
+                               kvm_mmu_mark_parents_unsync(vcpu, sp);
+                       }
                        pgprintk("%s: found\n", __func__);
                        return sp;
                }
@@ -940,8 +1128,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
-       if (!metaphysical)
+       if (!metaphysical) {
                rmap_write_protect(vcpu->kvm, gfn);
+               account_shadowed(vcpu->kvm, gfn);
+       }
        if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
                vcpu->arch.mmu.prefetch_page(vcpu, sp);
        else
@@ -1047,14 +1237,47 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
        }
 }
 
-static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+struct zap_walker {
+       struct kvm_unsync_walk walker;
+       struct kvm *kvm;
+       int zapped;
+};
+
+static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
+{
+       struct zap_walker *zap_walk = container_of(walk, struct zap_walker,
+                                                    walker);
+       kvm_mmu_zap_page(zap_walk->kvm, sp);
+       zap_walk->zapped = 1;
+       return 0;
+}
+
+static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       struct zap_walker walker = {
+               .walker = { .entry = mmu_zap_fn, },
+               .kvm = kvm,
+               .zapped = 0,
+       };
+
+       if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+               return 0;
+       mmu_unsync_walk(sp, &walker.walker);
+       return walker.zapped;
+}
+
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
+       int ret;
        ++kvm->stat.mmu_shadow_zapped;
+       ret = mmu_zap_unsync_children(kvm, sp);
        kvm_mmu_page_unlink_children(kvm, sp);
        kvm_mmu_unlink_parents(kvm, sp);
        kvm_flush_remote_tlbs(kvm);
        if (!sp->role.invalid && !sp->role.metaphysical)
                unaccount_shadowed(kvm, sp->gfn);
+       if (sp->unsync)
+               kvm_unlink_unsync_page(kvm, sp);
        if (!sp->root_count) {
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
@@ -1064,6 +1287,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
                kvm_reload_remote_mmus(kvm);
        }
        kvm_mmu_reset_last_pte_updated(kvm);
+       return ret;
 }
 
 /*
@@ -1116,8 +1340,9 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
                if (sp->gfn == gfn && !sp->role.metaphysical) {
                        pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
                                 sp->role.word);
-                       kvm_mmu_zap_page(kvm, sp);
                        r = 1;
+                       if (kvm_mmu_zap_page(kvm, sp))
+                               n = bucket->first;
                }
        return r;
 }
@@ -1140,6 +1365,20 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
        __set_bit(slot, &sp->slot_bitmap);
 }
 
+static void mmu_convert_notrap(struct kvm_mmu_page *sp)
+{
+       int i;
+       u64 *pt = sp->spt;
+
+       if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
+               return;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+               if (pt[i] == shadow_notrap_nonpresent_pte)
+                       set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
+       }
+}
+
 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct page *page;
@@ -1154,10 +1393,52 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
        return page;
 }
 
+static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       unsigned index;
+       struct hlist_head *bucket;
+       struct kvm_mmu_page *s;
+       struct hlist_node *node, *n;
+
+       index = kvm_page_table_hashfn(sp->gfn);
+       bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+       /* don't unsync if pagetable is shadowed with multiple roles */
+       hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+               if (s->gfn != sp->gfn || s->role.metaphysical)
+                       continue;
+               if (s->role.word != sp->role.word)
+                       return 1;
+       }
+       kvm_mmu_mark_parents_unsync(vcpu, sp);
+       ++vcpu->kvm->stat.mmu_unsync;
+       sp->unsync = 1;
+       mmu_convert_notrap(sp);
+       return 0;
+}
+
+static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                 bool can_unsync)
+{
+       struct kvm_mmu_page *shadow;
+
+       shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
+       if (shadow) {
+               if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
+                       return 1;
+               if (shadow->unsync)
+                       return 0;
+               if (can_unsync && oos_shadow)
+                       return kvm_unsync_page(vcpu, shadow);
+               return 1;
+       }
+       return 0;
+}
+
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   gfn_t gfn, pfn_t pfn, bool speculative)
+                   gfn_t gfn, pfn_t pfn, bool speculative,
+                   bool can_unsync)
 {
        u64 spte;
        int ret = 0;
@@ -1184,7 +1465,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 
        if ((pte_access & ACC_WRITE_MASK)
            || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
-               struct kvm_mmu_page *shadow;
 
                if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
                        ret = 1;
@@ -1194,8 +1474,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 
                spte |= PT_WRITABLE_MASK;
 
-               shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
-               if (shadow) {
+               if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
                        pgprintk("%s: found shadow page for %lx, marking ro\n",
                                 __func__, gfn);
                        ret = 1;
@@ -1213,7 +1492,6 @@ set_pte:
        return ret;
 }
 
-
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
@@ -1251,7 +1529,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, gfn, pfn, speculative)) {
+                     dirty, largepage, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1471,6 +1749,37 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
 }
 
+static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_mmu_page *sp;
+
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
+               sp = page_header(root);
+               mmu_sync_children(vcpu, sp);
+               return;
+       }
+       for (i = 0; i < 4; ++i) {
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
+
+               if (root) {
+                       root &= PT64_BASE_ADDR_MASK;
+                       sp = page_header(root);
+                       mmu_sync_children(vcpu, sp);
+               }
+       }
+}
+
+void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
+{
+       spin_lock(&vcpu->kvm->mmu_lock);
+       mmu_sync_roots(vcpu);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+}
+
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -1554,6 +1863,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
        context->free = nonpaging_free;
        context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
+       context->invlpg = nonpaging_invlpg;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -1602,6 +1912,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
        context->gva_to_gpa = paging64_gva_to_gpa;
        context->prefetch_page = paging64_prefetch_page;
        context->sync_page = paging64_sync_page;
+       context->invlpg = paging64_invlpg;
        context->free = paging_free;
        context->root_level = level;
        context->shadow_root_level = level;
@@ -1624,6 +1935,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
        context->free = paging_free;
        context->prefetch_page = paging32_prefetch_page;
        context->sync_page = paging32_sync_page;
+       context->invlpg = paging32_invlpg;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -1644,6 +1956,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->free = nonpaging_free;
        context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
+       context->invlpg = nonpaging_invlpg;
        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
        context->root_hpa = INVALID_PAGE;
 
@@ -1715,6 +2028,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
        mmu_alloc_roots(vcpu);
+       mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
@@ -1921,7 +2235,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                         */
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, sp->role.word);
-                       kvm_mmu_zap_page(vcpu->kvm, sp);
+                       if (kvm_mmu_zap_page(vcpu->kvm, sp))
+                               n = bucket->first;
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
@@ -2035,6 +2350,16 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       spin_lock(&vcpu->kvm->mmu_lock);
+       vcpu->arch.mmu.invlpg(vcpu, gva);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_mmu_flush_tlb(vcpu);
+       ++vcpu->stat.invlpg;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
+
 void kvm_enable_tdp(void)
 {
        tdp_enabled = true;
@@ -2145,7 +2470,9 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 
        spin_lock(&kvm->mmu_lock);
        list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
-               kvm_mmu_zap_page(kvm, sp);
+               if (kvm_mmu_zap_page(kvm, sp))
+                       node = container_of(kvm->arch.active_mmu_pages.next,
+                                           struct kvm_mmu_page, link);
        spin_unlock(&kvm->mmu_lock);
 
        kvm_flush_remote_tlbs(kvm);
@@ -2307,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->tlb_flush(vcpu);
+       set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
        return 1;
 }