]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kvm/mmu.c
731e6fe9cb078bea1e2655d03f70c5ed81850b77
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "mmu.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 0;
70 module_param(dbg, bool, 0644);
71 #endif
72
73 #ifndef MMU_DEBUG
74 #define ASSERT(x) do { } while (0)
75 #else
76 #define ASSERT(x)                                                       \
77         if (!(x)) {                                                     \
78                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
79                        __FILE__, __LINE__, #x);                         \
80         }
81 #endif
82
83 #define PT_FIRST_AVAIL_BITS_SHIFT 9
84 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
86 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88 #define PT64_LEVEL_BITS 9
89
90 #define PT64_LEVEL_SHIFT(level) \
91                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
92
93 #define PT64_LEVEL_MASK(level) \
94                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96 #define PT64_INDEX(address, level)\
97         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100 #define PT32_LEVEL_BITS 10
101
102 #define PT32_LEVEL_SHIFT(level) \
103                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
104
105 #define PT32_LEVEL_MASK(level) \
106                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108 #define PT32_INDEX(address, level)\
109         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
112 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
113 #define PT64_DIR_BASE_ADDR_MASK \
114         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116 #define PT32_BASE_ADDR_MASK PAGE_MASK
117 #define PT32_DIR_BASE_ADDR_MASK \
118         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
120 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121                         | PT64_NX_MASK)
122
123 #define PFERR_PRESENT_MASK (1U << 0)
124 #define PFERR_WRITE_MASK (1U << 1)
125 #define PFERR_USER_MASK (1U << 2)
126 #define PFERR_FETCH_MASK (1U << 4)
127
128 #define PT_DIRECTORY_LEVEL 2
129 #define PT_PAGE_TABLE_LEVEL 1
130
131 #define RMAP_EXT 4
132
133 #define ACC_EXEC_MASK    1
134 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
135 #define ACC_USER_MASK    PT_USER_MASK
136 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
138 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
139
140 struct kvm_rmap_desc {
141         u64 *shadow_ptes[RMAP_EXT];
142         struct kvm_rmap_desc *more;
143 };
144
145 struct kvm_shadow_walk {
146         int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
147                      u64 addr, u64 *spte, int level);
148 };
149
150 static struct kmem_cache *pte_chain_cache;
151 static struct kmem_cache *rmap_desc_cache;
152 static struct kmem_cache *mmu_page_header_cache;
153
154 static u64 __read_mostly shadow_trap_nonpresent_pte;
155 static u64 __read_mostly shadow_notrap_nonpresent_pte;
156 static u64 __read_mostly shadow_base_present_pte;
157 static u64 __read_mostly shadow_nx_mask;
158 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
159 static u64 __read_mostly shadow_user_mask;
160 static u64 __read_mostly shadow_accessed_mask;
161 static u64 __read_mostly shadow_dirty_mask;
162
163 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
164 {
165         shadow_trap_nonpresent_pte = trap_pte;
166         shadow_notrap_nonpresent_pte = notrap_pte;
167 }
168 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
169
170 void kvm_mmu_set_base_ptes(u64 base_pte)
171 {
172         shadow_base_present_pte = base_pte;
173 }
174 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
175
176 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
177                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
178 {
179         shadow_user_mask = user_mask;
180         shadow_accessed_mask = accessed_mask;
181         shadow_dirty_mask = dirty_mask;
182         shadow_nx_mask = nx_mask;
183         shadow_x_mask = x_mask;
184 }
185 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
186
187 static int is_write_protection(struct kvm_vcpu *vcpu)
188 {
189         return vcpu->arch.cr0 & X86_CR0_WP;
190 }
191
192 static int is_cpuid_PSE36(void)
193 {
194         return 1;
195 }
196
197 static int is_nx(struct kvm_vcpu *vcpu)
198 {
199         return vcpu->arch.shadow_efer & EFER_NX;
200 }
201
202 static int is_present_pte(unsigned long pte)
203 {
204         return pte & PT_PRESENT_MASK;
205 }
206
207 static int is_shadow_present_pte(u64 pte)
208 {
209         return pte != shadow_trap_nonpresent_pte
210                 && pte != shadow_notrap_nonpresent_pte;
211 }
212
213 static int is_large_pte(u64 pte)
214 {
215         return pte & PT_PAGE_SIZE_MASK;
216 }
217
218 static int is_writeble_pte(unsigned long pte)
219 {
220         return pte & PT_WRITABLE_MASK;
221 }
222
223 static int is_dirty_pte(unsigned long pte)
224 {
225         return pte & shadow_dirty_mask;
226 }
227
228 static int is_rmap_pte(u64 pte)
229 {
230         return is_shadow_present_pte(pte);
231 }
232
233 static pfn_t spte_to_pfn(u64 pte)
234 {
235         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
236 }
237
238 static gfn_t pse36_gfn_delta(u32 gpte)
239 {
240         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
241
242         return (gpte & PT32_DIR_PSE36_MASK) << shift;
243 }
244
245 static void set_shadow_pte(u64 *sptep, u64 spte)
246 {
247 #ifdef CONFIG_X86_64
248         set_64bit((unsigned long *)sptep, spte);
249 #else
250         set_64bit((unsigned long long *)sptep, spte);
251 #endif
252 }
253
254 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
255                                   struct kmem_cache *base_cache, int min)
256 {
257         void *obj;
258
259         if (cache->nobjs >= min)
260                 return 0;
261         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
262                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
263                 if (!obj)
264                         return -ENOMEM;
265                 cache->objects[cache->nobjs++] = obj;
266         }
267         return 0;
268 }
269
270 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
271 {
272         while (mc->nobjs)
273                 kfree(mc->objects[--mc->nobjs]);
274 }
275
276 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
277                                        int min)
278 {
279         struct page *page;
280
281         if (cache->nobjs >= min)
282                 return 0;
283         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
284                 page = alloc_page(GFP_KERNEL);
285                 if (!page)
286                         return -ENOMEM;
287                 set_page_private(page, 0);
288                 cache->objects[cache->nobjs++] = page_address(page);
289         }
290         return 0;
291 }
292
293 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
294 {
295         while (mc->nobjs)
296                 free_page((unsigned long)mc->objects[--mc->nobjs]);
297 }
298
299 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
300 {
301         int r;
302
303         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
304                                    pte_chain_cache, 4);
305         if (r)
306                 goto out;
307         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
308                                    rmap_desc_cache, 1);
309         if (r)
310                 goto out;
311         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
312         if (r)
313                 goto out;
314         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
315                                    mmu_page_header_cache, 4);
316 out:
317         return r;
318 }
319
320 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
321 {
322         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
323         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
324         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
325         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
326 }
327
328 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
329                                     size_t size)
330 {
331         void *p;
332
333         BUG_ON(!mc->nobjs);
334         p = mc->objects[--mc->nobjs];
335         memset(p, 0, size);
336         return p;
337 }
338
339 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
340 {
341         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
342                                       sizeof(struct kvm_pte_chain));
343 }
344
345 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
346 {
347         kfree(pc);
348 }
349
350 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
351 {
352         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
353                                       sizeof(struct kvm_rmap_desc));
354 }
355
356 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
357 {
358         kfree(rd);
359 }
360
361 /*
362  * Return the pointer to the largepage write count for a given
363  * gfn, handling slots that are not large page aligned.
364  */
365 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
366 {
367         unsigned long idx;
368
369         idx = (gfn / KVM_PAGES_PER_HPAGE) -
370               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
371         return &slot->lpage_info[idx].write_count;
372 }
373
374 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
375 {
376         int *write_count;
377
378         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
379         *write_count += 1;
380 }
381
382 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
383 {
384         int *write_count;
385
386         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
387         *write_count -= 1;
388         WARN_ON(*write_count < 0);
389 }
390
391 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
392 {
393         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
394         int *largepage_idx;
395
396         if (slot) {
397                 largepage_idx = slot_largepage_idx(gfn, slot);
398                 return *largepage_idx;
399         }
400
401         return 1;
402 }
403
404 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
405 {
406         struct vm_area_struct *vma;
407         unsigned long addr;
408         int ret = 0;
409
410         addr = gfn_to_hva(kvm, gfn);
411         if (kvm_is_error_hva(addr))
412                 return ret;
413
414         down_read(&current->mm->mmap_sem);
415         vma = find_vma(current->mm, addr);
416         if (vma && is_vm_hugetlb_page(vma))
417                 ret = 1;
418         up_read(&current->mm->mmap_sem);
419
420         return ret;
421 }
422
423 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
424 {
425         struct kvm_memory_slot *slot;
426
427         if (has_wrprotected_page(vcpu->kvm, large_gfn))
428                 return 0;
429
430         if (!host_largepage_backed(vcpu->kvm, large_gfn))
431                 return 0;
432
433         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
434         if (slot && slot->dirty_bitmap)
435                 return 0;
436
437         return 1;
438 }
439
440 /*
441  * Take gfn and return the reverse mapping to it.
442  * Note: gfn must be unaliased before this function get called
443  */
444
445 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
446 {
447         struct kvm_memory_slot *slot;
448         unsigned long idx;
449
450         slot = gfn_to_memslot(kvm, gfn);
451         if (!lpage)
452                 return &slot->rmap[gfn - slot->base_gfn];
453
454         idx = (gfn / KVM_PAGES_PER_HPAGE) -
455               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
456
457         return &slot->lpage_info[idx].rmap_pde;
458 }
459
460 /*
461  * Reverse mapping data structures:
462  *
463  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
464  * that points to page_address(page).
465  *
466  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
467  * containing more mappings.
468  */
469 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
470 {
471         struct kvm_mmu_page *sp;
472         struct kvm_rmap_desc *desc;
473         unsigned long *rmapp;
474         int i;
475
476         if (!is_rmap_pte(*spte))
477                 return;
478         gfn = unalias_gfn(vcpu->kvm, gfn);
479         sp = page_header(__pa(spte));
480         sp->gfns[spte - sp->spt] = gfn;
481         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
482         if (!*rmapp) {
483                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
484                 *rmapp = (unsigned long)spte;
485         } else if (!(*rmapp & 1)) {
486                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
487                 desc = mmu_alloc_rmap_desc(vcpu);
488                 desc->shadow_ptes[0] = (u64 *)*rmapp;
489                 desc->shadow_ptes[1] = spte;
490                 *rmapp = (unsigned long)desc | 1;
491         } else {
492                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
493                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
494                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
495                         desc = desc->more;
496                 if (desc->shadow_ptes[RMAP_EXT-1]) {
497                         desc->more = mmu_alloc_rmap_desc(vcpu);
498                         desc = desc->more;
499                 }
500                 for (i = 0; desc->shadow_ptes[i]; ++i)
501                         ;
502                 desc->shadow_ptes[i] = spte;
503         }
504 }
505
506 static void rmap_desc_remove_entry(unsigned long *rmapp,
507                                    struct kvm_rmap_desc *desc,
508                                    int i,
509                                    struct kvm_rmap_desc *prev_desc)
510 {
511         int j;
512
513         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
514                 ;
515         desc->shadow_ptes[i] = desc->shadow_ptes[j];
516         desc->shadow_ptes[j] = NULL;
517         if (j != 0)
518                 return;
519         if (!prev_desc && !desc->more)
520                 *rmapp = (unsigned long)desc->shadow_ptes[0];
521         else
522                 if (prev_desc)
523                         prev_desc->more = desc->more;
524                 else
525                         *rmapp = (unsigned long)desc->more | 1;
526         mmu_free_rmap_desc(desc);
527 }
528
529 static void rmap_remove(struct kvm *kvm, u64 *spte)
530 {
531         struct kvm_rmap_desc *desc;
532         struct kvm_rmap_desc *prev_desc;
533         struct kvm_mmu_page *sp;
534         pfn_t pfn;
535         unsigned long *rmapp;
536         int i;
537
538         if (!is_rmap_pte(*spte))
539                 return;
540         sp = page_header(__pa(spte));
541         pfn = spte_to_pfn(*spte);
542         if (*spte & shadow_accessed_mask)
543                 kvm_set_pfn_accessed(pfn);
544         if (is_writeble_pte(*spte))
545                 kvm_release_pfn_dirty(pfn);
546         else
547                 kvm_release_pfn_clean(pfn);
548         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
549         if (!*rmapp) {
550                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
551                 BUG();
552         } else if (!(*rmapp & 1)) {
553                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
554                 if ((u64 *)*rmapp != spte) {
555                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
556                                spte, *spte);
557                         BUG();
558                 }
559                 *rmapp = 0;
560         } else {
561                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
562                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
563                 prev_desc = NULL;
564                 while (desc) {
565                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
566                                 if (desc->shadow_ptes[i] == spte) {
567                                         rmap_desc_remove_entry(rmapp,
568                                                                desc, i,
569                                                                prev_desc);
570                                         return;
571                                 }
572                         prev_desc = desc;
573                         desc = desc->more;
574                 }
575                 BUG();
576         }
577 }
578
579 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
580 {
581         struct kvm_rmap_desc *desc;
582         struct kvm_rmap_desc *prev_desc;
583         u64 *prev_spte;
584         int i;
585
586         if (!*rmapp)
587                 return NULL;
588         else if (!(*rmapp & 1)) {
589                 if (!spte)
590                         return (u64 *)*rmapp;
591                 return NULL;
592         }
593         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
594         prev_desc = NULL;
595         prev_spte = NULL;
596         while (desc) {
597                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
598                         if (prev_spte == spte)
599                                 return desc->shadow_ptes[i];
600                         prev_spte = desc->shadow_ptes[i];
601                 }
602                 desc = desc->more;
603         }
604         return NULL;
605 }
606
607 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
608 {
609         unsigned long *rmapp;
610         u64 *spte;
611         int write_protected = 0;
612
613         gfn = unalias_gfn(kvm, gfn);
614         rmapp = gfn_to_rmap(kvm, gfn, 0);
615
616         spte = rmap_next(kvm, rmapp, NULL);
617         while (spte) {
618                 BUG_ON(!spte);
619                 BUG_ON(!(*spte & PT_PRESENT_MASK));
620                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
621                 if (is_writeble_pte(*spte)) {
622                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
623                         write_protected = 1;
624                 }
625                 spte = rmap_next(kvm, rmapp, spte);
626         }
627         if (write_protected) {
628                 pfn_t pfn;
629
630                 spte = rmap_next(kvm, rmapp, NULL);
631                 pfn = spte_to_pfn(*spte);
632                 kvm_set_pfn_dirty(pfn);
633         }
634
635         /* check for huge page mappings */
636         rmapp = gfn_to_rmap(kvm, gfn, 1);
637         spte = rmap_next(kvm, rmapp, NULL);
638         while (spte) {
639                 BUG_ON(!spte);
640                 BUG_ON(!(*spte & PT_PRESENT_MASK));
641                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
642                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
643                 if (is_writeble_pte(*spte)) {
644                         rmap_remove(kvm, spte);
645                         --kvm->stat.lpages;
646                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
647                         spte = NULL;
648                         write_protected = 1;
649                 }
650                 spte = rmap_next(kvm, rmapp, spte);
651         }
652
653         if (write_protected)
654                 kvm_flush_remote_tlbs(kvm);
655
656         account_shadowed(kvm, gfn);
657 }
658
659 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
660 {
661         u64 *spte;
662         int need_tlb_flush = 0;
663
664         while ((spte = rmap_next(kvm, rmapp, NULL))) {
665                 BUG_ON(!(*spte & PT_PRESENT_MASK));
666                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
667                 rmap_remove(kvm, spte);
668                 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
669                 need_tlb_flush = 1;
670         }
671         return need_tlb_flush;
672 }
673
674 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
675                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
676 {
677         int i;
678         int retval = 0;
679
680         /*
681          * If mmap_sem isn't taken, we can look the memslots with only
682          * the mmu_lock by skipping over the slots with userspace_addr == 0.
683          */
684         for (i = 0; i < kvm->nmemslots; i++) {
685                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
686                 unsigned long start = memslot->userspace_addr;
687                 unsigned long end;
688
689                 /* mmu_lock protects userspace_addr */
690                 if (!start)
691                         continue;
692
693                 end = start + (memslot->npages << PAGE_SHIFT);
694                 if (hva >= start && hva < end) {
695                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
696                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
697                         retval |= handler(kvm,
698                                           &memslot->lpage_info[
699                                                   gfn_offset /
700                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
701                 }
702         }
703
704         return retval;
705 }
706
707 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
708 {
709         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
710 }
711
712 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
713 {
714         u64 *spte;
715         int young = 0;
716
717         /* always return old for EPT */
718         if (!shadow_accessed_mask)
719                 return 0;
720
721         spte = rmap_next(kvm, rmapp, NULL);
722         while (spte) {
723                 int _young;
724                 u64 _spte = *spte;
725                 BUG_ON(!(_spte & PT_PRESENT_MASK));
726                 _young = _spte & PT_ACCESSED_MASK;
727                 if (_young) {
728                         young = 1;
729                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
730                 }
731                 spte = rmap_next(kvm, rmapp, spte);
732         }
733         return young;
734 }
735
736 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
737 {
738         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
739 }
740
741 #ifdef MMU_DEBUG
742 static int is_empty_shadow_page(u64 *spt)
743 {
744         u64 *pos;
745         u64 *end;
746
747         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
748                 if (is_shadow_present_pte(*pos)) {
749                         printk(KERN_ERR "%s: %p %llx\n", __func__,
750                                pos, *pos);
751                         return 0;
752                 }
753         return 1;
754 }
755 #endif
756
757 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
758 {
759         ASSERT(is_empty_shadow_page(sp->spt));
760         list_del(&sp->link);
761         __free_page(virt_to_page(sp->spt));
762         __free_page(virt_to_page(sp->gfns));
763         kfree(sp);
764         ++kvm->arch.n_free_mmu_pages;
765 }
766
767 static unsigned kvm_page_table_hashfn(gfn_t gfn)
768 {
769         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
770 }
771
772 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
773                                                u64 *parent_pte)
774 {
775         struct kvm_mmu_page *sp;
776
777         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
778         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
779         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
780         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
781         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
782         ASSERT(is_empty_shadow_page(sp->spt));
783         sp->slot_bitmap = 0;
784         sp->multimapped = 0;
785         sp->parent_pte = parent_pte;
786         --vcpu->kvm->arch.n_free_mmu_pages;
787         return sp;
788 }
789
790 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
791                                     struct kvm_mmu_page *sp, u64 *parent_pte)
792 {
793         struct kvm_pte_chain *pte_chain;
794         struct hlist_node *node;
795         int i;
796
797         if (!parent_pte)
798                 return;
799         if (!sp->multimapped) {
800                 u64 *old = sp->parent_pte;
801
802                 if (!old) {
803                         sp->parent_pte = parent_pte;
804                         return;
805                 }
806                 sp->multimapped = 1;
807                 pte_chain = mmu_alloc_pte_chain(vcpu);
808                 INIT_HLIST_HEAD(&sp->parent_ptes);
809                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
810                 pte_chain->parent_ptes[0] = old;
811         }
812         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
813                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
814                         continue;
815                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
816                         if (!pte_chain->parent_ptes[i]) {
817                                 pte_chain->parent_ptes[i] = parent_pte;
818                                 return;
819                         }
820         }
821         pte_chain = mmu_alloc_pte_chain(vcpu);
822         BUG_ON(!pte_chain);
823         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
824         pte_chain->parent_ptes[0] = parent_pte;
825 }
826
827 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
828                                        u64 *parent_pte)
829 {
830         struct kvm_pte_chain *pte_chain;
831         struct hlist_node *node;
832         int i;
833
834         if (!sp->multimapped) {
835                 BUG_ON(sp->parent_pte != parent_pte);
836                 sp->parent_pte = NULL;
837                 return;
838         }
839         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
840                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
841                         if (!pte_chain->parent_ptes[i])
842                                 break;
843                         if (pte_chain->parent_ptes[i] != parent_pte)
844                                 continue;
845                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
846                                 && pte_chain->parent_ptes[i + 1]) {
847                                 pte_chain->parent_ptes[i]
848                                         = pte_chain->parent_ptes[i + 1];
849                                 ++i;
850                         }
851                         pte_chain->parent_ptes[i] = NULL;
852                         if (i == 0) {
853                                 hlist_del(&pte_chain->link);
854                                 mmu_free_pte_chain(pte_chain);
855                                 if (hlist_empty(&sp->parent_ptes)) {
856                                         sp->multimapped = 0;
857                                         sp->parent_pte = NULL;
858                                 }
859                         }
860                         return;
861                 }
862         BUG();
863 }
864
865 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
866                                     struct kvm_mmu_page *sp)
867 {
868         int i;
869
870         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
871                 sp->spt[i] = shadow_trap_nonpresent_pte;
872 }
873
874 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
875 {
876         unsigned index;
877         struct hlist_head *bucket;
878         struct kvm_mmu_page *sp;
879         struct hlist_node *node;
880
881         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
882         index = kvm_page_table_hashfn(gfn);
883         bucket = &kvm->arch.mmu_page_hash[index];
884         hlist_for_each_entry(sp, node, bucket, hash_link)
885                 if (sp->gfn == gfn && !sp->role.metaphysical
886                     && !sp->role.invalid) {
887                         pgprintk("%s: found role %x\n",
888                                  __func__, sp->role.word);
889                         return sp;
890                 }
891         return NULL;
892 }
893
894 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
895                                              gfn_t gfn,
896                                              gva_t gaddr,
897                                              unsigned level,
898                                              int metaphysical,
899                                              unsigned access,
900                                              u64 *parent_pte)
901 {
902         union kvm_mmu_page_role role;
903         unsigned index;
904         unsigned quadrant;
905         struct hlist_head *bucket;
906         struct kvm_mmu_page *sp;
907         struct hlist_node *node;
908
909         role.word = 0;
910         role.glevels = vcpu->arch.mmu.root_level;
911         role.level = level;
912         role.metaphysical = metaphysical;
913         role.access = access;
914         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
915                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
916                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
917                 role.quadrant = quadrant;
918         }
919         pgprintk("%s: looking gfn %lx role %x\n", __func__,
920                  gfn, role.word);
921         index = kvm_page_table_hashfn(gfn);
922         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
923         hlist_for_each_entry(sp, node, bucket, hash_link)
924                 if (sp->gfn == gfn && sp->role.word == role.word) {
925                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
926                         pgprintk("%s: found\n", __func__);
927                         return sp;
928                 }
929         ++vcpu->kvm->stat.mmu_cache_miss;
930         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
931         if (!sp)
932                 return sp;
933         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
934         sp->gfn = gfn;
935         sp->role = role;
936         hlist_add_head(&sp->hash_link, bucket);
937         if (!metaphysical)
938                 rmap_write_protect(vcpu->kvm, gfn);
939         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
940                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
941         else
942                 nonpaging_prefetch_page(vcpu, sp);
943         return sp;
944 }
945
946 static int walk_shadow(struct kvm_shadow_walk *walker,
947                        struct kvm_vcpu *vcpu, u64 addr)
948 {
949         hpa_t shadow_addr;
950         int level;
951         int r;
952         u64 *sptep;
953         unsigned index;
954
955         shadow_addr = vcpu->arch.mmu.root_hpa;
956         level = vcpu->arch.mmu.shadow_root_level;
957         if (level == PT32E_ROOT_LEVEL) {
958                 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
959                 shadow_addr &= PT64_BASE_ADDR_MASK;
960                 --level;
961         }
962
963         while (level >= PT_PAGE_TABLE_LEVEL) {
964                 index = SHADOW_PT_INDEX(addr, level);
965                 sptep = ((u64 *)__va(shadow_addr)) + index;
966                 r = walker->entry(walker, vcpu, addr, sptep, level);
967                 if (r)
968                         return r;
969                 shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
970                 --level;
971         }
972         return 0;
973 }
974
975 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
976                                          struct kvm_mmu_page *sp)
977 {
978         unsigned i;
979         u64 *pt;
980         u64 ent;
981
982         pt = sp->spt;
983
984         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
985                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
986                         if (is_shadow_present_pte(pt[i]))
987                                 rmap_remove(kvm, &pt[i]);
988                         pt[i] = shadow_trap_nonpresent_pte;
989                 }
990                 return;
991         }
992
993         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
994                 ent = pt[i];
995
996                 if (is_shadow_present_pte(ent)) {
997                         if (!is_large_pte(ent)) {
998                                 ent &= PT64_BASE_ADDR_MASK;
999                                 mmu_page_remove_parent_pte(page_header(ent),
1000                                                            &pt[i]);
1001                         } else {
1002                                 --kvm->stat.lpages;
1003                                 rmap_remove(kvm, &pt[i]);
1004                         }
1005                 }
1006                 pt[i] = shadow_trap_nonpresent_pte;
1007         }
1008 }
1009
1010 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1011 {
1012         mmu_page_remove_parent_pte(sp, parent_pte);
1013 }
1014
1015 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1016 {
1017         int i;
1018
1019         for (i = 0; i < KVM_MAX_VCPUS; ++i)
1020                 if (kvm->vcpus[i])
1021                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
1022 }
1023
1024 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1025 {
1026         u64 *parent_pte;
1027
1028         while (sp->multimapped || sp->parent_pte) {
1029                 if (!sp->multimapped)
1030                         parent_pte = sp->parent_pte;
1031                 else {
1032                         struct kvm_pte_chain *chain;
1033
1034                         chain = container_of(sp->parent_ptes.first,
1035                                              struct kvm_pte_chain, link);
1036                         parent_pte = chain->parent_ptes[0];
1037                 }
1038                 BUG_ON(!parent_pte);
1039                 kvm_mmu_put_page(sp, parent_pte);
1040                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1041         }
1042 }
1043
1044 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1045 {
1046         ++kvm->stat.mmu_shadow_zapped;
1047         kvm_mmu_page_unlink_children(kvm, sp);
1048         kvm_mmu_unlink_parents(kvm, sp);
1049         kvm_flush_remote_tlbs(kvm);
1050         if (!sp->role.invalid && !sp->role.metaphysical)
1051                 unaccount_shadowed(kvm, sp->gfn);
1052         if (!sp->root_count) {
1053                 hlist_del(&sp->hash_link);
1054                 kvm_mmu_free_page(kvm, sp);
1055         } else {
1056                 sp->role.invalid = 1;
1057                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1058                 kvm_reload_remote_mmus(kvm);
1059         }
1060         kvm_mmu_reset_last_pte_updated(kvm);
1061 }
1062
1063 /*
1064  * Changing the number of mmu pages allocated to the vm
1065  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1066  */
1067 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1068 {
1069         /*
1070          * If we set the number of mmu pages to be smaller be than the
1071          * number of actived pages , we must to free some mmu pages before we
1072          * change the value
1073          */
1074
1075         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1076             kvm_nr_mmu_pages) {
1077                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1078                                        - kvm->arch.n_free_mmu_pages;
1079
1080                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1081                         struct kvm_mmu_page *page;
1082
1083                         page = container_of(kvm->arch.active_mmu_pages.prev,
1084                                             struct kvm_mmu_page, link);
1085                         kvm_mmu_zap_page(kvm, page);
1086                         n_used_mmu_pages--;
1087                 }
1088                 kvm->arch.n_free_mmu_pages = 0;
1089         }
1090         else
1091                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1092                                          - kvm->arch.n_alloc_mmu_pages;
1093
1094         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1095 }
1096
1097 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1098 {
1099         unsigned index;
1100         struct hlist_head *bucket;
1101         struct kvm_mmu_page *sp;
1102         struct hlist_node *node, *n;
1103         int r;
1104
1105         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1106         r = 0;
1107         index = kvm_page_table_hashfn(gfn);
1108         bucket = &kvm->arch.mmu_page_hash[index];
1109         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1110                 if (sp->gfn == gfn && !sp->role.metaphysical) {
1111                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1112                                  sp->role.word);
1113                         kvm_mmu_zap_page(kvm, sp);
1114                         r = 1;
1115                 }
1116         return r;
1117 }
1118
1119 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1120 {
1121         struct kvm_mmu_page *sp;
1122
1123         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
1124                 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
1125                 kvm_mmu_zap_page(kvm, sp);
1126         }
1127 }
1128
1129 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1130 {
1131         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1132         struct kvm_mmu_page *sp = page_header(__pa(pte));
1133
1134         __set_bit(slot, &sp->slot_bitmap);
1135 }
1136
1137 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1138 {
1139         struct page *page;
1140
1141         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1142
1143         if (gpa == UNMAPPED_GVA)
1144                 return NULL;
1145
1146         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1147
1148         return page;
1149 }
1150
1151 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1152                     unsigned pte_access, int user_fault,
1153                     int write_fault, int dirty, int largepage,
1154                     gfn_t gfn, pfn_t pfn, bool speculative)
1155 {
1156         u64 spte;
1157         int ret = 0;
1158         /*
1159          * We don't set the accessed bit, since we sometimes want to see
1160          * whether the guest actually used the pte (in order to detect
1161          * demand paging).
1162          */
1163         spte = shadow_base_present_pte | shadow_dirty_mask;
1164         if (!speculative)
1165                 spte |= shadow_accessed_mask;
1166         if (!dirty)
1167                 pte_access &= ~ACC_WRITE_MASK;
1168         if (pte_access & ACC_EXEC_MASK)
1169                 spte |= shadow_x_mask;
1170         else
1171                 spte |= shadow_nx_mask;
1172         if (pte_access & ACC_USER_MASK)
1173                 spte |= shadow_user_mask;
1174         if (largepage)
1175                 spte |= PT_PAGE_SIZE_MASK;
1176
1177         spte |= (u64)pfn << PAGE_SHIFT;
1178
1179         if ((pte_access & ACC_WRITE_MASK)
1180             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1181                 struct kvm_mmu_page *shadow;
1182
1183                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1184                         ret = 1;
1185                         spte = shadow_trap_nonpresent_pte;
1186                         goto set_pte;
1187                 }
1188
1189                 spte |= PT_WRITABLE_MASK;
1190
1191                 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1192                 if (shadow) {
1193                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1194                                  __func__, gfn);
1195                         ret = 1;
1196                         pte_access &= ~ACC_WRITE_MASK;
1197                         if (is_writeble_pte(spte))
1198                                 spte &= ~PT_WRITABLE_MASK;
1199                 }
1200         }
1201
1202         if (pte_access & ACC_WRITE_MASK)
1203                 mark_page_dirty(vcpu->kvm, gfn);
1204
1205 set_pte:
1206         set_shadow_pte(shadow_pte, spte);
1207         return ret;
1208 }
1209
1210
1211 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1212                          unsigned pt_access, unsigned pte_access,
1213                          int user_fault, int write_fault, int dirty,
1214                          int *ptwrite, int largepage, gfn_t gfn,
1215                          pfn_t pfn, bool speculative)
1216 {
1217         int was_rmapped = 0;
1218         int was_writeble = is_writeble_pte(*shadow_pte);
1219
1220         pgprintk("%s: spte %llx access %x write_fault %d"
1221                  " user_fault %d gfn %lx\n",
1222                  __func__, *shadow_pte, pt_access,
1223                  write_fault, user_fault, gfn);
1224
1225         if (is_rmap_pte(*shadow_pte)) {
1226                 /*
1227                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1228                  * the parent of the now unreachable PTE.
1229                  */
1230                 if (largepage && !is_large_pte(*shadow_pte)) {
1231                         struct kvm_mmu_page *child;
1232                         u64 pte = *shadow_pte;
1233
1234                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1235                         mmu_page_remove_parent_pte(child, shadow_pte);
1236                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1237                         pgprintk("hfn old %lx new %lx\n",
1238                                  spte_to_pfn(*shadow_pte), pfn);
1239                         rmap_remove(vcpu->kvm, shadow_pte);
1240                 } else {
1241                         if (largepage)
1242                                 was_rmapped = is_large_pte(*shadow_pte);
1243                         else
1244                                 was_rmapped = 1;
1245                 }
1246         }
1247         if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1248                       dirty, largepage, gfn, pfn, speculative)) {
1249                 if (write_fault)
1250                         *ptwrite = 1;
1251                 kvm_x86_ops->tlb_flush(vcpu);
1252         }
1253
1254         pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1255         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1256                  is_large_pte(*shadow_pte)? "2MB" : "4kB",
1257                  is_present_pte(*shadow_pte)?"RW":"R", gfn,
1258                  *shadow_pte, shadow_pte);
1259         if (!was_rmapped && is_large_pte(*shadow_pte))
1260                 ++vcpu->kvm->stat.lpages;
1261
1262         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1263         if (!was_rmapped) {
1264                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1265                 if (!is_rmap_pte(*shadow_pte))
1266                         kvm_release_pfn_clean(pfn);
1267         } else {
1268                 if (was_writeble)
1269                         kvm_release_pfn_dirty(pfn);
1270                 else
1271                         kvm_release_pfn_clean(pfn);
1272         }
1273         if (speculative) {
1274                 vcpu->arch.last_pte_updated = shadow_pte;
1275                 vcpu->arch.last_pte_gfn = gfn;
1276         }
1277 }
1278
1279 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1280 {
1281 }
1282
1283 struct direct_shadow_walk {
1284         struct kvm_shadow_walk walker;
1285         pfn_t pfn;
1286         int write;
1287         int largepage;
1288         int pt_write;
1289 };
1290
1291 static int direct_map_entry(struct kvm_shadow_walk *_walk,
1292                             struct kvm_vcpu *vcpu,
1293                             u64 addr, u64 *sptep, int level)
1294 {
1295         struct direct_shadow_walk *walk =
1296                 container_of(_walk, struct direct_shadow_walk, walker);
1297         struct kvm_mmu_page *sp;
1298         gfn_t pseudo_gfn;
1299         gfn_t gfn = addr >> PAGE_SHIFT;
1300
1301         if (level == PT_PAGE_TABLE_LEVEL
1302             || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1303                 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1304                              0, walk->write, 1, &walk->pt_write,
1305                              walk->largepage, gfn, walk->pfn, false);
1306                 ++vcpu->stat.pf_fixed;
1307                 return 1;
1308         }
1309
1310         if (*sptep == shadow_trap_nonpresent_pte) {
1311                 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1312                 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
1313                                       1, ACC_ALL, sptep);
1314                 if (!sp) {
1315                         pgprintk("nonpaging_map: ENOMEM\n");
1316                         kvm_release_pfn_clean(walk->pfn);
1317                         return -ENOMEM;
1318                 }
1319
1320                 set_shadow_pte(sptep,
1321                                __pa(sp->spt)
1322                                | PT_PRESENT_MASK | PT_WRITABLE_MASK
1323                                | shadow_user_mask | shadow_x_mask);
1324         }
1325         return 0;
1326 }
1327
1328 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1329                         int largepage, gfn_t gfn, pfn_t pfn)
1330 {
1331         int r;
1332         struct direct_shadow_walk walker = {
1333                 .walker = { .entry = direct_map_entry, },
1334                 .pfn = pfn,
1335                 .largepage = largepage,
1336                 .write = write,
1337                 .pt_write = 0,
1338         };
1339
1340         r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1341         if (r < 0)
1342                 return r;
1343         return walker.pt_write;
1344 }
1345
1346 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1347 {
1348         int r;
1349         int largepage = 0;
1350         pfn_t pfn;
1351         unsigned long mmu_seq;
1352
1353         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1354                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1355                 largepage = 1;
1356         }
1357
1358         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1359         smp_rmb();
1360         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1361
1362         /* mmio */
1363         if (is_error_pfn(pfn)) {
1364                 kvm_release_pfn_clean(pfn);
1365                 return 1;
1366         }
1367
1368         spin_lock(&vcpu->kvm->mmu_lock);
1369         if (mmu_notifier_retry(vcpu, mmu_seq))
1370                 goto out_unlock;
1371         kvm_mmu_free_some_pages(vcpu);
1372         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1373         spin_unlock(&vcpu->kvm->mmu_lock);
1374
1375
1376         return r;
1377
1378 out_unlock:
1379         spin_unlock(&vcpu->kvm->mmu_lock);
1380         kvm_release_pfn_clean(pfn);
1381         return 0;
1382 }
1383
1384
1385 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1386 {
1387         int i;
1388         struct kvm_mmu_page *sp;
1389
1390         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1391                 return;
1392         spin_lock(&vcpu->kvm->mmu_lock);
1393         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1394                 hpa_t root = vcpu->arch.mmu.root_hpa;
1395
1396                 sp = page_header(root);
1397                 --sp->root_count;
1398                 if (!sp->root_count && sp->role.invalid)
1399                         kvm_mmu_zap_page(vcpu->kvm, sp);
1400                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1401                 spin_unlock(&vcpu->kvm->mmu_lock);
1402                 return;
1403         }
1404         for (i = 0; i < 4; ++i) {
1405                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1406
1407                 if (root) {
1408                         root &= PT64_BASE_ADDR_MASK;
1409                         sp = page_header(root);
1410                         --sp->root_count;
1411                         if (!sp->root_count && sp->role.invalid)
1412                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1413                 }
1414                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1415         }
1416         spin_unlock(&vcpu->kvm->mmu_lock);
1417         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1418 }
1419
1420 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1421 {
1422         int i;
1423         gfn_t root_gfn;
1424         struct kvm_mmu_page *sp;
1425         int metaphysical = 0;
1426
1427         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1428
1429         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1430                 hpa_t root = vcpu->arch.mmu.root_hpa;
1431
1432                 ASSERT(!VALID_PAGE(root));
1433                 if (tdp_enabled)
1434                         metaphysical = 1;
1435                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1436                                       PT64_ROOT_LEVEL, metaphysical,
1437                                       ACC_ALL, NULL);
1438                 root = __pa(sp->spt);
1439                 ++sp->root_count;
1440                 vcpu->arch.mmu.root_hpa = root;
1441                 return;
1442         }
1443         metaphysical = !is_paging(vcpu);
1444         if (tdp_enabled)
1445                 metaphysical = 1;
1446         for (i = 0; i < 4; ++i) {
1447                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1448
1449                 ASSERT(!VALID_PAGE(root));
1450                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1451                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1452                                 vcpu->arch.mmu.pae_root[i] = 0;
1453                                 continue;
1454                         }
1455                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1456                 } else if (vcpu->arch.mmu.root_level == 0)
1457                         root_gfn = 0;
1458                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1459                                       PT32_ROOT_LEVEL, metaphysical,
1460                                       ACC_ALL, NULL);
1461                 root = __pa(sp->spt);
1462                 ++sp->root_count;
1463                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1464         }
1465         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1466 }
1467
1468 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1469 {
1470         return vaddr;
1471 }
1472
1473 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1474                                 u32 error_code)
1475 {
1476         gfn_t gfn;
1477         int r;
1478
1479         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1480         r = mmu_topup_memory_caches(vcpu);
1481         if (r)
1482                 return r;
1483
1484         ASSERT(vcpu);
1485         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1486
1487         gfn = gva >> PAGE_SHIFT;
1488
1489         return nonpaging_map(vcpu, gva & PAGE_MASK,
1490                              error_code & PFERR_WRITE_MASK, gfn);
1491 }
1492
1493 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1494                                 u32 error_code)
1495 {
1496         pfn_t pfn;
1497         int r;
1498         int largepage = 0;
1499         gfn_t gfn = gpa >> PAGE_SHIFT;
1500         unsigned long mmu_seq;
1501
1502         ASSERT(vcpu);
1503         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1504
1505         r = mmu_topup_memory_caches(vcpu);
1506         if (r)
1507                 return r;
1508
1509         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1510                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1511                 largepage = 1;
1512         }
1513         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1514         smp_rmb();
1515         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1516         if (is_error_pfn(pfn)) {
1517                 kvm_release_pfn_clean(pfn);
1518                 return 1;
1519         }
1520         spin_lock(&vcpu->kvm->mmu_lock);
1521         if (mmu_notifier_retry(vcpu, mmu_seq))
1522                 goto out_unlock;
1523         kvm_mmu_free_some_pages(vcpu);
1524         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1525                          largepage, gfn, pfn);
1526         spin_unlock(&vcpu->kvm->mmu_lock);
1527
1528         return r;
1529
1530 out_unlock:
1531         spin_unlock(&vcpu->kvm->mmu_lock);
1532         kvm_release_pfn_clean(pfn);
1533         return 0;
1534 }
1535
1536 static void nonpaging_free(struct kvm_vcpu *vcpu)
1537 {
1538         mmu_free_roots(vcpu);
1539 }
1540
1541 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1542 {
1543         struct kvm_mmu *context = &vcpu->arch.mmu;
1544
1545         context->new_cr3 = nonpaging_new_cr3;
1546         context->page_fault = nonpaging_page_fault;
1547         context->gva_to_gpa = nonpaging_gva_to_gpa;
1548         context->free = nonpaging_free;
1549         context->prefetch_page = nonpaging_prefetch_page;
1550         context->root_level = 0;
1551         context->shadow_root_level = PT32E_ROOT_LEVEL;
1552         context->root_hpa = INVALID_PAGE;
1553         return 0;
1554 }
1555
1556 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1557 {
1558         ++vcpu->stat.tlb_flush;
1559         kvm_x86_ops->tlb_flush(vcpu);
1560 }
1561
1562 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1563 {
1564         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1565         mmu_free_roots(vcpu);
1566 }
1567
1568 static void inject_page_fault(struct kvm_vcpu *vcpu,
1569                               u64 addr,
1570                               u32 err_code)
1571 {
1572         kvm_inject_page_fault(vcpu, addr, err_code);
1573 }
1574
1575 static void paging_free(struct kvm_vcpu *vcpu)
1576 {
1577         nonpaging_free(vcpu);
1578 }
1579
1580 #define PTTYPE 64
1581 #include "paging_tmpl.h"
1582 #undef PTTYPE
1583
1584 #define PTTYPE 32
1585 #include "paging_tmpl.h"
1586 #undef PTTYPE
1587
1588 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1589 {
1590         struct kvm_mmu *context = &vcpu->arch.mmu;
1591
1592         ASSERT(is_pae(vcpu));
1593         context->new_cr3 = paging_new_cr3;
1594         context->page_fault = paging64_page_fault;
1595         context->gva_to_gpa = paging64_gva_to_gpa;
1596         context->prefetch_page = paging64_prefetch_page;
1597         context->free = paging_free;
1598         context->root_level = level;
1599         context->shadow_root_level = level;
1600         context->root_hpa = INVALID_PAGE;
1601         return 0;
1602 }
1603
1604 static int paging64_init_context(struct kvm_vcpu *vcpu)
1605 {
1606         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1607 }
1608
1609 static int paging32_init_context(struct kvm_vcpu *vcpu)
1610 {
1611         struct kvm_mmu *context = &vcpu->arch.mmu;
1612
1613         context->new_cr3 = paging_new_cr3;
1614         context->page_fault = paging32_page_fault;
1615         context->gva_to_gpa = paging32_gva_to_gpa;
1616         context->free = paging_free;
1617         context->prefetch_page = paging32_prefetch_page;
1618         context->root_level = PT32_ROOT_LEVEL;
1619         context->shadow_root_level = PT32E_ROOT_LEVEL;
1620         context->root_hpa = INVALID_PAGE;
1621         return 0;
1622 }
1623
1624 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1625 {
1626         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1627 }
1628
1629 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1630 {
1631         struct kvm_mmu *context = &vcpu->arch.mmu;
1632
1633         context->new_cr3 = nonpaging_new_cr3;
1634         context->page_fault = tdp_page_fault;
1635         context->free = nonpaging_free;
1636         context->prefetch_page = nonpaging_prefetch_page;
1637         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1638         context->root_hpa = INVALID_PAGE;
1639
1640         if (!is_paging(vcpu)) {
1641                 context->gva_to_gpa = nonpaging_gva_to_gpa;
1642                 context->root_level = 0;
1643         } else if (is_long_mode(vcpu)) {
1644                 context->gva_to_gpa = paging64_gva_to_gpa;
1645                 context->root_level = PT64_ROOT_LEVEL;
1646         } else if (is_pae(vcpu)) {
1647                 context->gva_to_gpa = paging64_gva_to_gpa;
1648                 context->root_level = PT32E_ROOT_LEVEL;
1649         } else {
1650                 context->gva_to_gpa = paging32_gva_to_gpa;
1651                 context->root_level = PT32_ROOT_LEVEL;
1652         }
1653
1654         return 0;
1655 }
1656
1657 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1658 {
1659         ASSERT(vcpu);
1660         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1661
1662         if (!is_paging(vcpu))
1663                 return nonpaging_init_context(vcpu);
1664         else if (is_long_mode(vcpu))
1665                 return paging64_init_context(vcpu);
1666         else if (is_pae(vcpu))
1667                 return paging32E_init_context(vcpu);
1668         else
1669                 return paging32_init_context(vcpu);
1670 }
1671
1672 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1673 {
1674         vcpu->arch.update_pte.pfn = bad_pfn;
1675
1676         if (tdp_enabled)
1677                 return init_kvm_tdp_mmu(vcpu);
1678         else
1679                 return init_kvm_softmmu(vcpu);
1680 }
1681
1682 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1683 {
1684         ASSERT(vcpu);
1685         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1686                 vcpu->arch.mmu.free(vcpu);
1687                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1688         }
1689 }
1690
1691 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1692 {
1693         destroy_kvm_mmu(vcpu);
1694         return init_kvm_mmu(vcpu);
1695 }
1696 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1697
1698 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1699 {
1700         int r;
1701
1702         r = mmu_topup_memory_caches(vcpu);
1703         if (r)
1704                 goto out;
1705         spin_lock(&vcpu->kvm->mmu_lock);
1706         kvm_mmu_free_some_pages(vcpu);
1707         mmu_alloc_roots(vcpu);
1708         spin_unlock(&vcpu->kvm->mmu_lock);
1709         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1710         kvm_mmu_flush_tlb(vcpu);
1711 out:
1712         return r;
1713 }
1714 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1715
1716 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1717 {
1718         mmu_free_roots(vcpu);
1719 }
1720
1721 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1722                                   struct kvm_mmu_page *sp,
1723                                   u64 *spte)
1724 {
1725         u64 pte;
1726         struct kvm_mmu_page *child;
1727
1728         pte = *spte;
1729         if (is_shadow_present_pte(pte)) {
1730                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1731                     is_large_pte(pte))
1732                         rmap_remove(vcpu->kvm, spte);
1733                 else {
1734                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1735                         mmu_page_remove_parent_pte(child, spte);
1736                 }
1737         }
1738         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1739         if (is_large_pte(pte))
1740                 --vcpu->kvm->stat.lpages;
1741 }
1742
1743 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1744                                   struct kvm_mmu_page *sp,
1745                                   u64 *spte,
1746                                   const void *new)
1747 {
1748         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1749                 if (!vcpu->arch.update_pte.largepage ||
1750                     sp->role.glevels == PT32_ROOT_LEVEL) {
1751                         ++vcpu->kvm->stat.mmu_pde_zapped;
1752                         return;
1753                 }
1754         }
1755
1756         ++vcpu->kvm->stat.mmu_pte_updated;
1757         if (sp->role.glevels == PT32_ROOT_LEVEL)
1758                 paging32_update_pte(vcpu, sp, spte, new);
1759         else
1760                 paging64_update_pte(vcpu, sp, spte, new);
1761 }
1762
1763 static bool need_remote_flush(u64 old, u64 new)
1764 {
1765         if (!is_shadow_present_pte(old))
1766                 return false;
1767         if (!is_shadow_present_pte(new))
1768                 return true;
1769         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1770                 return true;
1771         old ^= PT64_NX_MASK;
1772         new ^= PT64_NX_MASK;
1773         return (old & ~new & PT64_PERM_MASK) != 0;
1774 }
1775
1776 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1777 {
1778         if (need_remote_flush(old, new))
1779                 kvm_flush_remote_tlbs(vcpu->kvm);
1780         else
1781                 kvm_mmu_flush_tlb(vcpu);
1782 }
1783
1784 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1785 {
1786         u64 *spte = vcpu->arch.last_pte_updated;
1787
1788         return !!(spte && (*spte & shadow_accessed_mask));
1789 }
1790
1791 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1792                                           const u8 *new, int bytes)
1793 {
1794         gfn_t gfn;
1795         int r;
1796         u64 gpte = 0;
1797         pfn_t pfn;
1798
1799         vcpu->arch.update_pte.largepage = 0;
1800
1801         if (bytes != 4 && bytes != 8)
1802                 return;
1803
1804         /*
1805          * Assume that the pte write on a page table of the same type
1806          * as the current vcpu paging mode.  This is nearly always true
1807          * (might be false while changing modes).  Note it is verified later
1808          * by update_pte().
1809          */
1810         if (is_pae(vcpu)) {
1811                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1812                 if ((bytes == 4) && (gpa % 4 == 0)) {
1813                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1814                         if (r)
1815                                 return;
1816                         memcpy((void *)&gpte + (gpa % 8), new, 4);
1817                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1818                         memcpy((void *)&gpte, new, 8);
1819                 }
1820         } else {
1821                 if ((bytes == 4) && (gpa % 4 == 0))
1822                         memcpy((void *)&gpte, new, 4);
1823         }
1824         if (!is_present_pte(gpte))
1825                 return;
1826         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1827
1828         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1829                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1830                 vcpu->arch.update_pte.largepage = 1;
1831         }
1832         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1833         smp_rmb();
1834         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1835
1836         if (is_error_pfn(pfn)) {
1837                 kvm_release_pfn_clean(pfn);
1838                 return;
1839         }
1840         vcpu->arch.update_pte.gfn = gfn;
1841         vcpu->arch.update_pte.pfn = pfn;
1842 }
1843
1844 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1845 {
1846         u64 *spte = vcpu->arch.last_pte_updated;
1847
1848         if (spte
1849             && vcpu->arch.last_pte_gfn == gfn
1850             && shadow_accessed_mask
1851             && !(*spte & shadow_accessed_mask)
1852             && is_shadow_present_pte(*spte))
1853                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1854 }
1855
1856 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1857                        const u8 *new, int bytes)
1858 {
1859         gfn_t gfn = gpa >> PAGE_SHIFT;
1860         struct kvm_mmu_page *sp;
1861         struct hlist_node *node, *n;
1862         struct hlist_head *bucket;
1863         unsigned index;
1864         u64 entry, gentry;
1865         u64 *spte;
1866         unsigned offset = offset_in_page(gpa);
1867         unsigned pte_size;
1868         unsigned page_offset;
1869         unsigned misaligned;
1870         unsigned quadrant;
1871         int level;
1872         int flooded = 0;
1873         int npte;
1874         int r;
1875
1876         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1877         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1878         spin_lock(&vcpu->kvm->mmu_lock);
1879         kvm_mmu_access_page(vcpu, gfn);
1880         kvm_mmu_free_some_pages(vcpu);
1881         ++vcpu->kvm->stat.mmu_pte_write;
1882         kvm_mmu_audit(vcpu, "pre pte write");
1883         if (gfn == vcpu->arch.last_pt_write_gfn
1884             && !last_updated_pte_accessed(vcpu)) {
1885                 ++vcpu->arch.last_pt_write_count;
1886                 if (vcpu->arch.last_pt_write_count >= 3)
1887                         flooded = 1;
1888         } else {
1889                 vcpu->arch.last_pt_write_gfn = gfn;
1890                 vcpu->arch.last_pt_write_count = 1;
1891                 vcpu->arch.last_pte_updated = NULL;
1892         }
1893         index = kvm_page_table_hashfn(gfn);
1894         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1895         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1896                 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
1897                         continue;
1898                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1899                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1900                 misaligned |= bytes < 4;
1901                 if (misaligned || flooded) {
1902                         /*
1903                          * Misaligned accesses are too much trouble to fix
1904                          * up; also, they usually indicate a page is not used
1905                          * as a page table.
1906                          *
1907                          * If we're seeing too many writes to a page,
1908                          * it may no longer be a page table, or we may be
1909                          * forking, in which case it is better to unmap the
1910                          * page.
1911                          */
1912                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1913                                  gpa, bytes, sp->role.word);
1914                         kvm_mmu_zap_page(vcpu->kvm, sp);
1915                         ++vcpu->kvm->stat.mmu_flooded;
1916                         continue;
1917                 }
1918                 page_offset = offset;
1919                 level = sp->role.level;
1920                 npte = 1;
1921                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1922                         page_offset <<= 1;      /* 32->64 */
1923                         /*
1924                          * A 32-bit pde maps 4MB while the shadow pdes map
1925                          * only 2MB.  So we need to double the offset again
1926                          * and zap two pdes instead of one.
1927                          */
1928                         if (level == PT32_ROOT_LEVEL) {
1929                                 page_offset &= ~7; /* kill rounding error */
1930                                 page_offset <<= 1;
1931                                 npte = 2;
1932                         }
1933                         quadrant = page_offset >> PAGE_SHIFT;
1934                         page_offset &= ~PAGE_MASK;
1935                         if (quadrant != sp->role.quadrant)
1936                                 continue;
1937                 }
1938                 spte = &sp->spt[page_offset / sizeof(*spte)];
1939                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1940                         gentry = 0;
1941                         r = kvm_read_guest_atomic(vcpu->kvm,
1942                                                   gpa & ~(u64)(pte_size - 1),
1943                                                   &gentry, pte_size);
1944                         new = (const void *)&gentry;
1945                         if (r < 0)
1946                                 new = NULL;
1947                 }
1948                 while (npte--) {
1949                         entry = *spte;
1950                         mmu_pte_write_zap_pte(vcpu, sp, spte);
1951                         if (new)
1952                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1953                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1954                         ++spte;
1955                 }
1956         }
1957         kvm_mmu_audit(vcpu, "post pte write");
1958         spin_unlock(&vcpu->kvm->mmu_lock);
1959         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1960                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1961                 vcpu->arch.update_pte.pfn = bad_pfn;
1962         }
1963 }
1964
1965 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1966 {
1967         gpa_t gpa;
1968         int r;
1969
1970         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1971
1972         spin_lock(&vcpu->kvm->mmu_lock);
1973         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1974         spin_unlock(&vcpu->kvm->mmu_lock);
1975         return r;
1976 }
1977 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
1978
1979 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1980 {
1981         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1982                 struct kvm_mmu_page *sp;
1983
1984                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1985                                   struct kvm_mmu_page, link);
1986                 kvm_mmu_zap_page(vcpu->kvm, sp);
1987                 ++vcpu->kvm->stat.mmu_recycled;
1988         }
1989 }
1990
1991 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1992 {
1993         int r;
1994         enum emulation_result er;
1995
1996         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1997         if (r < 0)
1998                 goto out;
1999
2000         if (!r) {
2001                 r = 1;
2002                 goto out;
2003         }
2004
2005         r = mmu_topup_memory_caches(vcpu);
2006         if (r)
2007                 goto out;
2008
2009         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2010
2011         switch (er) {
2012         case EMULATE_DONE:
2013                 return 1;
2014         case EMULATE_DO_MMIO:
2015                 ++vcpu->stat.mmio_exits;
2016                 return 0;
2017         case EMULATE_FAIL:
2018                 kvm_report_emulation_failure(vcpu, "pagetable");
2019                 return 1;
2020         default:
2021                 BUG();
2022         }
2023 out:
2024         return r;
2025 }
2026 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2027
2028 void kvm_enable_tdp(void)
2029 {
2030         tdp_enabled = true;
2031 }
2032 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2033
2034 void kvm_disable_tdp(void)
2035 {
2036         tdp_enabled = false;
2037 }
2038 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2039
2040 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2041 {
2042         struct kvm_mmu_page *sp;
2043
2044         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2045                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
2046                                   struct kvm_mmu_page, link);
2047                 kvm_mmu_zap_page(vcpu->kvm, sp);
2048                 cond_resched();
2049         }
2050         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2051 }
2052
2053 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2054 {
2055         struct page *page;
2056         int i;
2057
2058         ASSERT(vcpu);
2059
2060         if (vcpu->kvm->arch.n_requested_mmu_pages)
2061                 vcpu->kvm->arch.n_free_mmu_pages =
2062                                         vcpu->kvm->arch.n_requested_mmu_pages;
2063         else
2064                 vcpu->kvm->arch.n_free_mmu_pages =
2065                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2066         /*
2067          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2068          * Therefore we need to allocate shadow page tables in the first
2069          * 4GB of memory, which happens to fit the DMA32 zone.
2070          */
2071         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2072         if (!page)
2073                 goto error_1;
2074         vcpu->arch.mmu.pae_root = page_address(page);
2075         for (i = 0; i < 4; ++i)
2076                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2077
2078         return 0;
2079
2080 error_1:
2081         free_mmu_pages(vcpu);
2082         return -ENOMEM;
2083 }
2084
2085 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2086 {
2087         ASSERT(vcpu);
2088         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2089
2090         return alloc_mmu_pages(vcpu);
2091 }
2092
2093 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2094 {
2095         ASSERT(vcpu);
2096         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2097
2098         return init_kvm_mmu(vcpu);
2099 }
2100
2101 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2102 {
2103         ASSERT(vcpu);
2104
2105         destroy_kvm_mmu(vcpu);
2106         free_mmu_pages(vcpu);
2107         mmu_free_memory_caches(vcpu);
2108 }
2109
2110 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2111 {
2112         struct kvm_mmu_page *sp;
2113
2114         spin_lock(&kvm->mmu_lock);
2115         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2116                 int i;
2117                 u64 *pt;
2118
2119                 if (!test_bit(slot, &sp->slot_bitmap))
2120                         continue;
2121
2122                 pt = sp->spt;
2123                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2124                         /* avoid RMW */
2125                         if (pt[i] & PT_WRITABLE_MASK)
2126                                 pt[i] &= ~PT_WRITABLE_MASK;
2127         }
2128         kvm_flush_remote_tlbs(kvm);
2129         spin_unlock(&kvm->mmu_lock);
2130 }
2131
2132 void kvm_mmu_zap_all(struct kvm *kvm)
2133 {
2134         struct kvm_mmu_page *sp, *node;
2135
2136         spin_lock(&kvm->mmu_lock);
2137         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2138                 kvm_mmu_zap_page(kvm, sp);
2139         spin_unlock(&kvm->mmu_lock);
2140
2141         kvm_flush_remote_tlbs(kvm);
2142 }
2143
2144 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2145 {
2146         struct kvm_mmu_page *page;
2147
2148         page = container_of(kvm->arch.active_mmu_pages.prev,
2149                             struct kvm_mmu_page, link);
2150         kvm_mmu_zap_page(kvm, page);
2151 }
2152
2153 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2154 {
2155         struct kvm *kvm;
2156         struct kvm *kvm_freed = NULL;
2157         int cache_count = 0;
2158
2159         spin_lock(&kvm_lock);
2160
2161         list_for_each_entry(kvm, &vm_list, vm_list) {
2162                 int npages;
2163
2164                 if (!down_read_trylock(&kvm->slots_lock))
2165                         continue;
2166                 spin_lock(&kvm->mmu_lock);
2167                 npages = kvm->arch.n_alloc_mmu_pages -
2168                          kvm->arch.n_free_mmu_pages;
2169                 cache_count += npages;
2170                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2171                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2172                         cache_count--;
2173                         kvm_freed = kvm;
2174                 }
2175                 nr_to_scan--;
2176
2177                 spin_unlock(&kvm->mmu_lock);
2178                 up_read(&kvm->slots_lock);
2179         }
2180         if (kvm_freed)
2181                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2182
2183         spin_unlock(&kvm_lock);
2184
2185         return cache_count;
2186 }
2187
2188 static struct shrinker mmu_shrinker = {
2189         .shrink = mmu_shrink,
2190         .seeks = DEFAULT_SEEKS * 10,
2191 };
2192
2193 static void mmu_destroy_caches(void)
2194 {
2195         if (pte_chain_cache)
2196                 kmem_cache_destroy(pte_chain_cache);
2197         if (rmap_desc_cache)
2198                 kmem_cache_destroy(rmap_desc_cache);
2199         if (mmu_page_header_cache)
2200                 kmem_cache_destroy(mmu_page_header_cache);
2201 }
2202
2203 void kvm_mmu_module_exit(void)
2204 {
2205         mmu_destroy_caches();
2206         unregister_shrinker(&mmu_shrinker);
2207 }
2208
2209 int kvm_mmu_module_init(void)
2210 {
2211         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2212                                             sizeof(struct kvm_pte_chain),
2213                                             0, 0, NULL);
2214         if (!pte_chain_cache)
2215                 goto nomem;
2216         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2217                                             sizeof(struct kvm_rmap_desc),
2218                                             0, 0, NULL);
2219         if (!rmap_desc_cache)
2220                 goto nomem;
2221
2222         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2223                                                   sizeof(struct kvm_mmu_page),
2224                                                   0, 0, NULL);
2225         if (!mmu_page_header_cache)
2226                 goto nomem;
2227
2228         register_shrinker(&mmu_shrinker);
2229
2230         return 0;
2231
2232 nomem:
2233         mmu_destroy_caches();
2234         return -ENOMEM;
2235 }
2236
2237 /*
2238  * Caculate mmu pages needed for kvm.
2239  */
2240 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2241 {
2242         int i;
2243         unsigned int nr_mmu_pages;
2244         unsigned int  nr_pages = 0;
2245
2246         for (i = 0; i < kvm->nmemslots; i++)
2247                 nr_pages += kvm->memslots[i].npages;
2248
2249         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2250         nr_mmu_pages = max(nr_mmu_pages,
2251                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2252
2253         return nr_mmu_pages;
2254 }
2255
2256 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2257                                 unsigned len)
2258 {
2259         if (len > buffer->len)
2260                 return NULL;
2261         return buffer->ptr;
2262 }
2263
2264 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2265                                 unsigned len)
2266 {
2267         void *ret;
2268
2269         ret = pv_mmu_peek_buffer(buffer, len);
2270         if (!ret)
2271                 return ret;
2272         buffer->ptr += len;
2273         buffer->len -= len;
2274         buffer->processed += len;
2275         return ret;
2276 }
2277
2278 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2279                              gpa_t addr, gpa_t value)
2280 {
2281         int bytes = 8;
2282         int r;
2283
2284         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2285                 bytes = 4;
2286
2287         r = mmu_topup_memory_caches(vcpu);
2288         if (r)
2289                 return r;
2290
2291         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2292                 return -EFAULT;
2293
2294         return 1;
2295 }
2296
2297 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2298 {
2299         kvm_x86_ops->tlb_flush(vcpu);
2300         return 1;
2301 }
2302
2303 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2304 {
2305         spin_lock(&vcpu->kvm->mmu_lock);
2306         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2307         spin_unlock(&vcpu->kvm->mmu_lock);
2308         return 1;
2309 }
2310
2311 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2312                              struct kvm_pv_mmu_op_buffer *buffer)
2313 {
2314         struct kvm_mmu_op_header *header;
2315
2316         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2317         if (!header)
2318                 return 0;
2319         switch (header->op) {
2320         case KVM_MMU_OP_WRITE_PTE: {
2321                 struct kvm_mmu_op_write_pte *wpte;
2322
2323                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2324                 if (!wpte)
2325                         return 0;
2326                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2327                                         wpte->pte_val);
2328         }
2329         case KVM_MMU_OP_FLUSH_TLB: {
2330                 struct kvm_mmu_op_flush_tlb *ftlb;
2331
2332                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2333                 if (!ftlb)
2334                         return 0;
2335                 return kvm_pv_mmu_flush_tlb(vcpu);
2336         }
2337         case KVM_MMU_OP_RELEASE_PT: {
2338                 struct kvm_mmu_op_release_pt *rpt;
2339
2340                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2341                 if (!rpt)
2342                         return 0;
2343                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2344         }
2345         default: return 0;
2346         }
2347 }
2348
2349 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2350                   gpa_t addr, unsigned long *ret)
2351 {
2352         int r;
2353         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2354
2355         buffer->ptr = buffer->buf;
2356         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2357         buffer->processed = 0;
2358
2359         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2360         if (r)
2361                 goto out;
2362
2363         while (buffer->len) {
2364                 r = kvm_pv_mmu_op_one(vcpu, buffer);
2365                 if (r < 0)
2366                         goto out;
2367                 if (r == 0)
2368                         break;
2369         }
2370
2371         r = 1;
2372 out:
2373         *ret = buffer->processed;
2374         return r;
2375 }
2376
2377 #ifdef AUDIT
2378
2379 static const char *audit_msg;
2380
2381 static gva_t canonicalize(gva_t gva)
2382 {
2383 #ifdef CONFIG_X86_64
2384         gva = (long long)(gva << 16) >> 16;
2385 #endif
2386         return gva;
2387 }
2388
2389 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2390                                 gva_t va, int level)
2391 {
2392         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2393         int i;
2394         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2395
2396         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2397                 u64 ent = pt[i];
2398
2399                 if (ent == shadow_trap_nonpresent_pte)
2400                         continue;
2401
2402                 va = canonicalize(va);
2403                 if (level > 1) {
2404                         if (ent == shadow_notrap_nonpresent_pte)
2405                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
2406                                        " in nonleaf level: levels %d gva %lx"
2407                                        " level %d pte %llx\n", audit_msg,
2408                                        vcpu->arch.mmu.root_level, va, level, ent);
2409
2410                         audit_mappings_page(vcpu, ent, va, level - 1);
2411                 } else {
2412                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2413                         hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
2414
2415                         if (is_shadow_present_pte(ent)
2416                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
2417                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
2418                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2419                                        audit_msg, vcpu->arch.mmu.root_level,
2420                                        va, gpa, hpa, ent,
2421                                        is_shadow_present_pte(ent));
2422                         else if (ent == shadow_notrap_nonpresent_pte
2423                                  && !is_error_hpa(hpa))
2424                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
2425                                        " valid guest gva %lx\n", audit_msg, va);
2426                         kvm_release_pfn_clean(pfn);
2427
2428                 }
2429         }
2430 }
2431
2432 static void audit_mappings(struct kvm_vcpu *vcpu)
2433 {
2434         unsigned i;
2435
2436         if (vcpu->arch.mmu.root_level == 4)
2437                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2438         else
2439                 for (i = 0; i < 4; ++i)
2440                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2441                                 audit_mappings_page(vcpu,
2442                                                     vcpu->arch.mmu.pae_root[i],
2443                                                     i << 30,
2444                                                     2);
2445 }
2446
2447 static int count_rmaps(struct kvm_vcpu *vcpu)
2448 {
2449         int nmaps = 0;
2450         int i, j, k;
2451
2452         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2453                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2454                 struct kvm_rmap_desc *d;
2455
2456                 for (j = 0; j < m->npages; ++j) {
2457                         unsigned long *rmapp = &m->rmap[j];
2458
2459                         if (!*rmapp)
2460                                 continue;
2461                         if (!(*rmapp & 1)) {
2462                                 ++nmaps;
2463                                 continue;
2464                         }
2465                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2466                         while (d) {
2467                                 for (k = 0; k < RMAP_EXT; ++k)
2468                                         if (d->shadow_ptes[k])
2469                                                 ++nmaps;
2470                                         else
2471                                                 break;
2472                                 d = d->more;
2473                         }
2474                 }
2475         }
2476         return nmaps;
2477 }
2478
2479 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2480 {
2481         int nmaps = 0;
2482         struct kvm_mmu_page *sp;
2483         int i;
2484
2485         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2486                 u64 *pt = sp->spt;
2487
2488                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2489                         continue;
2490
2491                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2492                         u64 ent = pt[i];
2493
2494                         if (!(ent & PT_PRESENT_MASK))
2495                                 continue;
2496                         if (!(ent & PT_WRITABLE_MASK))
2497                                 continue;
2498                         ++nmaps;
2499                 }
2500         }
2501         return nmaps;
2502 }
2503
2504 static void audit_rmap(struct kvm_vcpu *vcpu)
2505 {
2506         int n_rmap = count_rmaps(vcpu);
2507         int n_actual = count_writable_mappings(vcpu);
2508
2509         if (n_rmap != n_actual)
2510                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2511                        __func__, audit_msg, n_rmap, n_actual);
2512 }
2513
2514 static void audit_write_protection(struct kvm_vcpu *vcpu)
2515 {
2516         struct kvm_mmu_page *sp;
2517         struct kvm_memory_slot *slot;
2518         unsigned long *rmapp;
2519         gfn_t gfn;
2520
2521         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2522                 if (sp->role.metaphysical)
2523                         continue;
2524
2525                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2526                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2527                 rmapp = &slot->rmap[gfn - slot->base_gfn];
2528                 if (*rmapp)
2529                         printk(KERN_ERR "%s: (%s) shadow page has writable"
2530                                " mappings: gfn %lx role %x\n",
2531                                __func__, audit_msg, sp->gfn,
2532                                sp->role.word);
2533         }
2534 }
2535
2536 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2537 {
2538         int olddbg = dbg;
2539
2540         dbg = 0;
2541         audit_msg = msg;
2542         audit_rmap(vcpu);
2543         audit_write_protection(vcpu);
2544         audit_mappings(vcpu);
2545         dbg = olddbg;
2546 }
2547
2548 #endif