]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kvm/mmu.c
KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "mmu.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 0;
70 module_param(dbg, bool, 0644);
71 #endif
72
73 #ifndef MMU_DEBUG
74 #define ASSERT(x) do { } while (0)
75 #else
76 #define ASSERT(x)                                                       \
77         if (!(x)) {                                                     \
78                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
79                        __FILE__, __LINE__, #x);                         \
80         }
81 #endif
82
83 #define PT_FIRST_AVAIL_BITS_SHIFT 9
84 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
86 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88 #define PT64_LEVEL_BITS 9
89
90 #define PT64_LEVEL_SHIFT(level) \
91                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
92
93 #define PT64_LEVEL_MASK(level) \
94                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96 #define PT64_INDEX(address, level)\
97         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100 #define PT32_LEVEL_BITS 10
101
102 #define PT32_LEVEL_SHIFT(level) \
103                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
104
105 #define PT32_LEVEL_MASK(level) \
106                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108 #define PT32_INDEX(address, level)\
109         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
112 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
113 #define PT64_DIR_BASE_ADDR_MASK \
114         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116 #define PT32_BASE_ADDR_MASK PAGE_MASK
117 #define PT32_DIR_BASE_ADDR_MASK \
118         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
120 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121                         | PT64_NX_MASK)
122
123 #define PFERR_PRESENT_MASK (1U << 0)
124 #define PFERR_WRITE_MASK (1U << 1)
125 #define PFERR_USER_MASK (1U << 2)
126 #define PFERR_FETCH_MASK (1U << 4)
127
128 #define PT_DIRECTORY_LEVEL 2
129 #define PT_PAGE_TABLE_LEVEL 1
130
131 #define RMAP_EXT 4
132
133 #define ACC_EXEC_MASK    1
134 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
135 #define ACC_USER_MASK    PT_USER_MASK
136 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
138 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
139
140 struct kvm_rmap_desc {
141         u64 *shadow_ptes[RMAP_EXT];
142         struct kvm_rmap_desc *more;
143 };
144
145 struct kvm_shadow_walk {
146         int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
147                      u64 addr, u64 *spte, int level);
148 };
149
150 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
151
152 static struct kmem_cache *pte_chain_cache;
153 static struct kmem_cache *rmap_desc_cache;
154 static struct kmem_cache *mmu_page_header_cache;
155
156 static u64 __read_mostly shadow_trap_nonpresent_pte;
157 static u64 __read_mostly shadow_notrap_nonpresent_pte;
158 static u64 __read_mostly shadow_base_present_pte;
159 static u64 __read_mostly shadow_nx_mask;
160 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
161 static u64 __read_mostly shadow_user_mask;
162 static u64 __read_mostly shadow_accessed_mask;
163 static u64 __read_mostly shadow_dirty_mask;
164
165 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
166 {
167         shadow_trap_nonpresent_pte = trap_pte;
168         shadow_notrap_nonpresent_pte = notrap_pte;
169 }
170 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
171
172 void kvm_mmu_set_base_ptes(u64 base_pte)
173 {
174         shadow_base_present_pte = base_pte;
175 }
176 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
177
178 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
179                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
180 {
181         shadow_user_mask = user_mask;
182         shadow_accessed_mask = accessed_mask;
183         shadow_dirty_mask = dirty_mask;
184         shadow_nx_mask = nx_mask;
185         shadow_x_mask = x_mask;
186 }
187 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
188
189 static int is_write_protection(struct kvm_vcpu *vcpu)
190 {
191         return vcpu->arch.cr0 & X86_CR0_WP;
192 }
193
194 static int is_cpuid_PSE36(void)
195 {
196         return 1;
197 }
198
199 static int is_nx(struct kvm_vcpu *vcpu)
200 {
201         return vcpu->arch.shadow_efer & EFER_NX;
202 }
203
204 static int is_present_pte(unsigned long pte)
205 {
206         return pte & PT_PRESENT_MASK;
207 }
208
209 static int is_shadow_present_pte(u64 pte)
210 {
211         return pte != shadow_trap_nonpresent_pte
212                 && pte != shadow_notrap_nonpresent_pte;
213 }
214
215 static int is_large_pte(u64 pte)
216 {
217         return pte & PT_PAGE_SIZE_MASK;
218 }
219
220 static int is_writeble_pte(unsigned long pte)
221 {
222         return pte & PT_WRITABLE_MASK;
223 }
224
225 static int is_dirty_pte(unsigned long pte)
226 {
227         return pte & shadow_dirty_mask;
228 }
229
230 static int is_rmap_pte(u64 pte)
231 {
232         return is_shadow_present_pte(pte);
233 }
234
235 static pfn_t spte_to_pfn(u64 pte)
236 {
237         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
238 }
239
240 static gfn_t pse36_gfn_delta(u32 gpte)
241 {
242         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
243
244         return (gpte & PT32_DIR_PSE36_MASK) << shift;
245 }
246
247 static void set_shadow_pte(u64 *sptep, u64 spte)
248 {
249 #ifdef CONFIG_X86_64
250         set_64bit((unsigned long *)sptep, spte);
251 #else
252         set_64bit((unsigned long long *)sptep, spte);
253 #endif
254 }
255
256 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
257                                   struct kmem_cache *base_cache, int min)
258 {
259         void *obj;
260
261         if (cache->nobjs >= min)
262                 return 0;
263         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
264                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
265                 if (!obj)
266                         return -ENOMEM;
267                 cache->objects[cache->nobjs++] = obj;
268         }
269         return 0;
270 }
271
272 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
273 {
274         while (mc->nobjs)
275                 kfree(mc->objects[--mc->nobjs]);
276 }
277
278 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
279                                        int min)
280 {
281         struct page *page;
282
283         if (cache->nobjs >= min)
284                 return 0;
285         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
286                 page = alloc_page(GFP_KERNEL);
287                 if (!page)
288                         return -ENOMEM;
289                 set_page_private(page, 0);
290                 cache->objects[cache->nobjs++] = page_address(page);
291         }
292         return 0;
293 }
294
295 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
296 {
297         while (mc->nobjs)
298                 free_page((unsigned long)mc->objects[--mc->nobjs]);
299 }
300
301 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
302 {
303         int r;
304
305         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
306                                    pte_chain_cache, 4);
307         if (r)
308                 goto out;
309         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
310                                    rmap_desc_cache, 1);
311         if (r)
312                 goto out;
313         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
314         if (r)
315                 goto out;
316         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
317                                    mmu_page_header_cache, 4);
318 out:
319         return r;
320 }
321
322 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
323 {
324         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
325         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
326         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
327         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
328 }
329
330 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
331                                     size_t size)
332 {
333         void *p;
334
335         BUG_ON(!mc->nobjs);
336         p = mc->objects[--mc->nobjs];
337         memset(p, 0, size);
338         return p;
339 }
340
341 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
342 {
343         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
344                                       sizeof(struct kvm_pte_chain));
345 }
346
347 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
348 {
349         kfree(pc);
350 }
351
352 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
353 {
354         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
355                                       sizeof(struct kvm_rmap_desc));
356 }
357
358 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
359 {
360         kfree(rd);
361 }
362
363 /*
364  * Return the pointer to the largepage write count for a given
365  * gfn, handling slots that are not large page aligned.
366  */
367 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
368 {
369         unsigned long idx;
370
371         idx = (gfn / KVM_PAGES_PER_HPAGE) -
372               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
373         return &slot->lpage_info[idx].write_count;
374 }
375
376 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
377 {
378         int *write_count;
379
380         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
381         *write_count += 1;
382 }
383
384 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
385 {
386         int *write_count;
387
388         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
389         *write_count -= 1;
390         WARN_ON(*write_count < 0);
391 }
392
393 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
394 {
395         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
396         int *largepage_idx;
397
398         if (slot) {
399                 largepage_idx = slot_largepage_idx(gfn, slot);
400                 return *largepage_idx;
401         }
402
403         return 1;
404 }
405
406 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
407 {
408         struct vm_area_struct *vma;
409         unsigned long addr;
410         int ret = 0;
411
412         addr = gfn_to_hva(kvm, gfn);
413         if (kvm_is_error_hva(addr))
414                 return ret;
415
416         down_read(&current->mm->mmap_sem);
417         vma = find_vma(current->mm, addr);
418         if (vma && is_vm_hugetlb_page(vma))
419                 ret = 1;
420         up_read(&current->mm->mmap_sem);
421
422         return ret;
423 }
424
425 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
426 {
427         struct kvm_memory_slot *slot;
428
429         if (has_wrprotected_page(vcpu->kvm, large_gfn))
430                 return 0;
431
432         if (!host_largepage_backed(vcpu->kvm, large_gfn))
433                 return 0;
434
435         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
436         if (slot && slot->dirty_bitmap)
437                 return 0;
438
439         return 1;
440 }
441
442 /*
443  * Take gfn and return the reverse mapping to it.
444  * Note: gfn must be unaliased before this function get called
445  */
446
447 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
448 {
449         struct kvm_memory_slot *slot;
450         unsigned long idx;
451
452         slot = gfn_to_memslot(kvm, gfn);
453         if (!lpage)
454                 return &slot->rmap[gfn - slot->base_gfn];
455
456         idx = (gfn / KVM_PAGES_PER_HPAGE) -
457               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
458
459         return &slot->lpage_info[idx].rmap_pde;
460 }
461
462 /*
463  * Reverse mapping data structures:
464  *
465  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
466  * that points to page_address(page).
467  *
468  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
469  * containing more mappings.
470  */
471 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
472 {
473         struct kvm_mmu_page *sp;
474         struct kvm_rmap_desc *desc;
475         unsigned long *rmapp;
476         int i;
477
478         if (!is_rmap_pte(*spte))
479                 return;
480         gfn = unalias_gfn(vcpu->kvm, gfn);
481         sp = page_header(__pa(spte));
482         sp->gfns[spte - sp->spt] = gfn;
483         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
484         if (!*rmapp) {
485                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
486                 *rmapp = (unsigned long)spte;
487         } else if (!(*rmapp & 1)) {
488                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
489                 desc = mmu_alloc_rmap_desc(vcpu);
490                 desc->shadow_ptes[0] = (u64 *)*rmapp;
491                 desc->shadow_ptes[1] = spte;
492                 *rmapp = (unsigned long)desc | 1;
493         } else {
494                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
495                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
496                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
497                         desc = desc->more;
498                 if (desc->shadow_ptes[RMAP_EXT-1]) {
499                         desc->more = mmu_alloc_rmap_desc(vcpu);
500                         desc = desc->more;
501                 }
502                 for (i = 0; desc->shadow_ptes[i]; ++i)
503                         ;
504                 desc->shadow_ptes[i] = spte;
505         }
506 }
507
508 static void rmap_desc_remove_entry(unsigned long *rmapp,
509                                    struct kvm_rmap_desc *desc,
510                                    int i,
511                                    struct kvm_rmap_desc *prev_desc)
512 {
513         int j;
514
515         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
516                 ;
517         desc->shadow_ptes[i] = desc->shadow_ptes[j];
518         desc->shadow_ptes[j] = NULL;
519         if (j != 0)
520                 return;
521         if (!prev_desc && !desc->more)
522                 *rmapp = (unsigned long)desc->shadow_ptes[0];
523         else
524                 if (prev_desc)
525                         prev_desc->more = desc->more;
526                 else
527                         *rmapp = (unsigned long)desc->more | 1;
528         mmu_free_rmap_desc(desc);
529 }
530
531 static void rmap_remove(struct kvm *kvm, u64 *spte)
532 {
533         struct kvm_rmap_desc *desc;
534         struct kvm_rmap_desc *prev_desc;
535         struct kvm_mmu_page *sp;
536         pfn_t pfn;
537         unsigned long *rmapp;
538         int i;
539
540         if (!is_rmap_pte(*spte))
541                 return;
542         sp = page_header(__pa(spte));
543         pfn = spte_to_pfn(*spte);
544         if (*spte & shadow_accessed_mask)
545                 kvm_set_pfn_accessed(pfn);
546         if (is_writeble_pte(*spte))
547                 kvm_release_pfn_dirty(pfn);
548         else
549                 kvm_release_pfn_clean(pfn);
550         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
551         if (!*rmapp) {
552                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
553                 BUG();
554         } else if (!(*rmapp & 1)) {
555                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
556                 if ((u64 *)*rmapp != spte) {
557                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
558                                spte, *spte);
559                         BUG();
560                 }
561                 *rmapp = 0;
562         } else {
563                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
564                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
565                 prev_desc = NULL;
566                 while (desc) {
567                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
568                                 if (desc->shadow_ptes[i] == spte) {
569                                         rmap_desc_remove_entry(rmapp,
570                                                                desc, i,
571                                                                prev_desc);
572                                         return;
573                                 }
574                         prev_desc = desc;
575                         desc = desc->more;
576                 }
577                 BUG();
578         }
579 }
580
581 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
582 {
583         struct kvm_rmap_desc *desc;
584         struct kvm_rmap_desc *prev_desc;
585         u64 *prev_spte;
586         int i;
587
588         if (!*rmapp)
589                 return NULL;
590         else if (!(*rmapp & 1)) {
591                 if (!spte)
592                         return (u64 *)*rmapp;
593                 return NULL;
594         }
595         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
596         prev_desc = NULL;
597         prev_spte = NULL;
598         while (desc) {
599                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
600                         if (prev_spte == spte)
601                                 return desc->shadow_ptes[i];
602                         prev_spte = desc->shadow_ptes[i];
603                 }
604                 desc = desc->more;
605         }
606         return NULL;
607 }
608
609 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
610 {
611         unsigned long *rmapp;
612         u64 *spte;
613         int write_protected = 0;
614
615         gfn = unalias_gfn(kvm, gfn);
616         rmapp = gfn_to_rmap(kvm, gfn, 0);
617
618         spte = rmap_next(kvm, rmapp, NULL);
619         while (spte) {
620                 BUG_ON(!spte);
621                 BUG_ON(!(*spte & PT_PRESENT_MASK));
622                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
623                 if (is_writeble_pte(*spte)) {
624                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
625                         write_protected = 1;
626                 }
627                 spte = rmap_next(kvm, rmapp, spte);
628         }
629         if (write_protected) {
630                 pfn_t pfn;
631
632                 spte = rmap_next(kvm, rmapp, NULL);
633                 pfn = spte_to_pfn(*spte);
634                 kvm_set_pfn_dirty(pfn);
635         }
636
637         /* check for huge page mappings */
638         rmapp = gfn_to_rmap(kvm, gfn, 1);
639         spte = rmap_next(kvm, rmapp, NULL);
640         while (spte) {
641                 BUG_ON(!spte);
642                 BUG_ON(!(*spte & PT_PRESENT_MASK));
643                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
644                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
645                 if (is_writeble_pte(*spte)) {
646                         rmap_remove(kvm, spte);
647                         --kvm->stat.lpages;
648                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
649                         spte = NULL;
650                         write_protected = 1;
651                 }
652                 spte = rmap_next(kvm, rmapp, spte);
653         }
654
655         if (write_protected)
656                 kvm_flush_remote_tlbs(kvm);
657
658         account_shadowed(kvm, gfn);
659 }
660
661 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
662 {
663         u64 *spte;
664         int need_tlb_flush = 0;
665
666         while ((spte = rmap_next(kvm, rmapp, NULL))) {
667                 BUG_ON(!(*spte & PT_PRESENT_MASK));
668                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
669                 rmap_remove(kvm, spte);
670                 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
671                 need_tlb_flush = 1;
672         }
673         return need_tlb_flush;
674 }
675
676 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
677                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
678 {
679         int i;
680         int retval = 0;
681
682         /*
683          * If mmap_sem isn't taken, we can look the memslots with only
684          * the mmu_lock by skipping over the slots with userspace_addr == 0.
685          */
686         for (i = 0; i < kvm->nmemslots; i++) {
687                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
688                 unsigned long start = memslot->userspace_addr;
689                 unsigned long end;
690
691                 /* mmu_lock protects userspace_addr */
692                 if (!start)
693                         continue;
694
695                 end = start + (memslot->npages << PAGE_SHIFT);
696                 if (hva >= start && hva < end) {
697                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
698                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
699                         retval |= handler(kvm,
700                                           &memslot->lpage_info[
701                                                   gfn_offset /
702                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
703                 }
704         }
705
706         return retval;
707 }
708
709 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
710 {
711         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
712 }
713
714 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
715 {
716         u64 *spte;
717         int young = 0;
718
719         /* always return old for EPT */
720         if (!shadow_accessed_mask)
721                 return 0;
722
723         spte = rmap_next(kvm, rmapp, NULL);
724         while (spte) {
725                 int _young;
726                 u64 _spte = *spte;
727                 BUG_ON(!(_spte & PT_PRESENT_MASK));
728                 _young = _spte & PT_ACCESSED_MASK;
729                 if (_young) {
730                         young = 1;
731                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
732                 }
733                 spte = rmap_next(kvm, rmapp, spte);
734         }
735         return young;
736 }
737
738 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
739 {
740         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
741 }
742
743 #ifdef MMU_DEBUG
744 static int is_empty_shadow_page(u64 *spt)
745 {
746         u64 *pos;
747         u64 *end;
748
749         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
750                 if (is_shadow_present_pte(*pos)) {
751                         printk(KERN_ERR "%s: %p %llx\n", __func__,
752                                pos, *pos);
753                         return 0;
754                 }
755         return 1;
756 }
757 #endif
758
759 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
760 {
761         ASSERT(is_empty_shadow_page(sp->spt));
762         list_del(&sp->link);
763         __free_page(virt_to_page(sp->spt));
764         __free_page(virt_to_page(sp->gfns));
765         kfree(sp);
766         ++kvm->arch.n_free_mmu_pages;
767 }
768
769 static unsigned kvm_page_table_hashfn(gfn_t gfn)
770 {
771         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
772 }
773
774 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
775                                                u64 *parent_pte)
776 {
777         struct kvm_mmu_page *sp;
778
779         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
780         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
781         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
782         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
783         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
784         ASSERT(is_empty_shadow_page(sp->spt));
785         sp->slot_bitmap = 0;
786         sp->multimapped = 0;
787         sp->parent_pte = parent_pte;
788         --vcpu->kvm->arch.n_free_mmu_pages;
789         return sp;
790 }
791
792 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
793                                     struct kvm_mmu_page *sp, u64 *parent_pte)
794 {
795         struct kvm_pte_chain *pte_chain;
796         struct hlist_node *node;
797         int i;
798
799         if (!parent_pte)
800                 return;
801         if (!sp->multimapped) {
802                 u64 *old = sp->parent_pte;
803
804                 if (!old) {
805                         sp->parent_pte = parent_pte;
806                         return;
807                 }
808                 sp->multimapped = 1;
809                 pte_chain = mmu_alloc_pte_chain(vcpu);
810                 INIT_HLIST_HEAD(&sp->parent_ptes);
811                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
812                 pte_chain->parent_ptes[0] = old;
813         }
814         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
815                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
816                         continue;
817                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
818                         if (!pte_chain->parent_ptes[i]) {
819                                 pte_chain->parent_ptes[i] = parent_pte;
820                                 return;
821                         }
822         }
823         pte_chain = mmu_alloc_pte_chain(vcpu);
824         BUG_ON(!pte_chain);
825         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
826         pte_chain->parent_ptes[0] = parent_pte;
827 }
828
829 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
830                                        u64 *parent_pte)
831 {
832         struct kvm_pte_chain *pte_chain;
833         struct hlist_node *node;
834         int i;
835
836         if (!sp->multimapped) {
837                 BUG_ON(sp->parent_pte != parent_pte);
838                 sp->parent_pte = NULL;
839                 return;
840         }
841         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
842                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
843                         if (!pte_chain->parent_ptes[i])
844                                 break;
845                         if (pte_chain->parent_ptes[i] != parent_pte)
846                                 continue;
847                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
848                                 && pte_chain->parent_ptes[i + 1]) {
849                                 pte_chain->parent_ptes[i]
850                                         = pte_chain->parent_ptes[i + 1];
851                                 ++i;
852                         }
853                         pte_chain->parent_ptes[i] = NULL;
854                         if (i == 0) {
855                                 hlist_del(&pte_chain->link);
856                                 mmu_free_pte_chain(pte_chain);
857                                 if (hlist_empty(&sp->parent_ptes)) {
858                                         sp->multimapped = 0;
859                                         sp->parent_pte = NULL;
860                                 }
861                         }
862                         return;
863                 }
864         BUG();
865 }
866
867
868 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
869                             mmu_parent_walk_fn fn)
870 {
871         struct kvm_pte_chain *pte_chain;
872         struct hlist_node *node;
873         struct kvm_mmu_page *parent_sp;
874         int i;
875
876         if (!sp->multimapped && sp->parent_pte) {
877                 parent_sp = page_header(__pa(sp->parent_pte));
878                 fn(vcpu, parent_sp);
879                 mmu_parent_walk(vcpu, parent_sp, fn);
880                 return;
881         }
882         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
883                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
884                         if (!pte_chain->parent_ptes[i])
885                                 break;
886                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
887                         fn(vcpu, parent_sp);
888                         mmu_parent_walk(vcpu, parent_sp, fn);
889                 }
890 }
891
892 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
893                                     struct kvm_mmu_page *sp)
894 {
895         int i;
896
897         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
898                 sp->spt[i] = shadow_trap_nonpresent_pte;
899 }
900
901 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
902                                struct kvm_mmu_page *sp)
903 {
904         return 1;
905 }
906
907 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
908 {
909 }
910
911 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
912 {
913         unsigned index;
914         struct hlist_head *bucket;
915         struct kvm_mmu_page *sp;
916         struct hlist_node *node;
917
918         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
919         index = kvm_page_table_hashfn(gfn);
920         bucket = &kvm->arch.mmu_page_hash[index];
921         hlist_for_each_entry(sp, node, bucket, hash_link)
922                 if (sp->gfn == gfn && !sp->role.metaphysical
923                     && !sp->role.invalid) {
924                         pgprintk("%s: found role %x\n",
925                                  __func__, sp->role.word);
926                         return sp;
927                 }
928         return NULL;
929 }
930
931 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
932                                              gfn_t gfn,
933                                              gva_t gaddr,
934                                              unsigned level,
935                                              int metaphysical,
936                                              unsigned access,
937                                              u64 *parent_pte)
938 {
939         union kvm_mmu_page_role role;
940         unsigned index;
941         unsigned quadrant;
942         struct hlist_head *bucket;
943         struct kvm_mmu_page *sp;
944         struct hlist_node *node;
945
946         role.word = 0;
947         role.glevels = vcpu->arch.mmu.root_level;
948         role.level = level;
949         role.metaphysical = metaphysical;
950         role.access = access;
951         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
952                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
953                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
954                 role.quadrant = quadrant;
955         }
956         pgprintk("%s: looking gfn %lx role %x\n", __func__,
957                  gfn, role.word);
958         index = kvm_page_table_hashfn(gfn);
959         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
960         hlist_for_each_entry(sp, node, bucket, hash_link)
961                 if (sp->gfn == gfn && sp->role.word == role.word) {
962                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
963                         pgprintk("%s: found\n", __func__);
964                         return sp;
965                 }
966         ++vcpu->kvm->stat.mmu_cache_miss;
967         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
968         if (!sp)
969                 return sp;
970         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
971         sp->gfn = gfn;
972         sp->role = role;
973         hlist_add_head(&sp->hash_link, bucket);
974         if (!metaphysical)
975                 rmap_write_protect(vcpu->kvm, gfn);
976         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
977                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
978         else
979                 nonpaging_prefetch_page(vcpu, sp);
980         return sp;
981 }
982
983 static int walk_shadow(struct kvm_shadow_walk *walker,
984                        struct kvm_vcpu *vcpu, u64 addr)
985 {
986         hpa_t shadow_addr;
987         int level;
988         int r;
989         u64 *sptep;
990         unsigned index;
991
992         shadow_addr = vcpu->arch.mmu.root_hpa;
993         level = vcpu->arch.mmu.shadow_root_level;
994         if (level == PT32E_ROOT_LEVEL) {
995                 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
996                 shadow_addr &= PT64_BASE_ADDR_MASK;
997                 --level;
998         }
999
1000         while (level >= PT_PAGE_TABLE_LEVEL) {
1001                 index = SHADOW_PT_INDEX(addr, level);
1002                 sptep = ((u64 *)__va(shadow_addr)) + index;
1003                 r = walker->entry(walker, vcpu, addr, sptep, level);
1004                 if (r)
1005                         return r;
1006                 shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
1007                 --level;
1008         }
1009         return 0;
1010 }
1011
1012 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1013                                          struct kvm_mmu_page *sp)
1014 {
1015         unsigned i;
1016         u64 *pt;
1017         u64 ent;
1018
1019         pt = sp->spt;
1020
1021         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1022                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1023                         if (is_shadow_present_pte(pt[i]))
1024                                 rmap_remove(kvm, &pt[i]);
1025                         pt[i] = shadow_trap_nonpresent_pte;
1026                 }
1027                 return;
1028         }
1029
1030         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1031                 ent = pt[i];
1032
1033                 if (is_shadow_present_pte(ent)) {
1034                         if (!is_large_pte(ent)) {
1035                                 ent &= PT64_BASE_ADDR_MASK;
1036                                 mmu_page_remove_parent_pte(page_header(ent),
1037                                                            &pt[i]);
1038                         } else {
1039                                 --kvm->stat.lpages;
1040                                 rmap_remove(kvm, &pt[i]);
1041                         }
1042                 }
1043                 pt[i] = shadow_trap_nonpresent_pte;
1044         }
1045 }
1046
1047 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1048 {
1049         mmu_page_remove_parent_pte(sp, parent_pte);
1050 }
1051
1052 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1053 {
1054         int i;
1055
1056         for (i = 0; i < KVM_MAX_VCPUS; ++i)
1057                 if (kvm->vcpus[i])
1058                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
1059 }
1060
1061 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1062 {
1063         u64 *parent_pte;
1064
1065         while (sp->multimapped || sp->parent_pte) {
1066                 if (!sp->multimapped)
1067                         parent_pte = sp->parent_pte;
1068                 else {
1069                         struct kvm_pte_chain *chain;
1070
1071                         chain = container_of(sp->parent_ptes.first,
1072                                              struct kvm_pte_chain, link);
1073                         parent_pte = chain->parent_ptes[0];
1074                 }
1075                 BUG_ON(!parent_pte);
1076                 kvm_mmu_put_page(sp, parent_pte);
1077                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1078         }
1079 }
1080
1081 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1082 {
1083         ++kvm->stat.mmu_shadow_zapped;
1084         kvm_mmu_page_unlink_children(kvm, sp);
1085         kvm_mmu_unlink_parents(kvm, sp);
1086         kvm_flush_remote_tlbs(kvm);
1087         if (!sp->role.invalid && !sp->role.metaphysical)
1088                 unaccount_shadowed(kvm, sp->gfn);
1089         if (!sp->root_count) {
1090                 hlist_del(&sp->hash_link);
1091                 kvm_mmu_free_page(kvm, sp);
1092         } else {
1093                 sp->role.invalid = 1;
1094                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1095                 kvm_reload_remote_mmus(kvm);
1096         }
1097         kvm_mmu_reset_last_pte_updated(kvm);
1098         return 0;
1099 }
1100
1101 /*
1102  * Changing the number of mmu pages allocated to the vm
1103  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1104  */
1105 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1106 {
1107         /*
1108          * If we set the number of mmu pages to be smaller be than the
1109          * number of actived pages , we must to free some mmu pages before we
1110          * change the value
1111          */
1112
1113         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1114             kvm_nr_mmu_pages) {
1115                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1116                                        - kvm->arch.n_free_mmu_pages;
1117
1118                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1119                         struct kvm_mmu_page *page;
1120
1121                         page = container_of(kvm->arch.active_mmu_pages.prev,
1122                                             struct kvm_mmu_page, link);
1123                         kvm_mmu_zap_page(kvm, page);
1124                         n_used_mmu_pages--;
1125                 }
1126                 kvm->arch.n_free_mmu_pages = 0;
1127         }
1128         else
1129                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1130                                          - kvm->arch.n_alloc_mmu_pages;
1131
1132         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1133 }
1134
1135 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1136 {
1137         unsigned index;
1138         struct hlist_head *bucket;
1139         struct kvm_mmu_page *sp;
1140         struct hlist_node *node, *n;
1141         int r;
1142
1143         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1144         r = 0;
1145         index = kvm_page_table_hashfn(gfn);
1146         bucket = &kvm->arch.mmu_page_hash[index];
1147         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1148                 if (sp->gfn == gfn && !sp->role.metaphysical) {
1149                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1150                                  sp->role.word);
1151                         r = 1;
1152                         if (kvm_mmu_zap_page(kvm, sp))
1153                                 n = bucket->first;
1154                 }
1155         return r;
1156 }
1157
1158 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1159 {
1160         struct kvm_mmu_page *sp;
1161
1162         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
1163                 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
1164                 kvm_mmu_zap_page(kvm, sp);
1165         }
1166 }
1167
1168 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1169 {
1170         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1171         struct kvm_mmu_page *sp = page_header(__pa(pte));
1172
1173         __set_bit(slot, &sp->slot_bitmap);
1174 }
1175
1176 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1177 {
1178         struct page *page;
1179
1180         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1181
1182         if (gpa == UNMAPPED_GVA)
1183                 return NULL;
1184
1185         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1186
1187         return page;
1188 }
1189
1190 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1191                     unsigned pte_access, int user_fault,
1192                     int write_fault, int dirty, int largepage,
1193                     gfn_t gfn, pfn_t pfn, bool speculative)
1194 {
1195         u64 spte;
1196         int ret = 0;
1197         /*
1198          * We don't set the accessed bit, since we sometimes want to see
1199          * whether the guest actually used the pte (in order to detect
1200          * demand paging).
1201          */
1202         spte = shadow_base_present_pte | shadow_dirty_mask;
1203         if (!speculative)
1204                 spte |= shadow_accessed_mask;
1205         if (!dirty)
1206                 pte_access &= ~ACC_WRITE_MASK;
1207         if (pte_access & ACC_EXEC_MASK)
1208                 spte |= shadow_x_mask;
1209         else
1210                 spte |= shadow_nx_mask;
1211         if (pte_access & ACC_USER_MASK)
1212                 spte |= shadow_user_mask;
1213         if (largepage)
1214                 spte |= PT_PAGE_SIZE_MASK;
1215
1216         spte |= (u64)pfn << PAGE_SHIFT;
1217
1218         if ((pte_access & ACC_WRITE_MASK)
1219             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1220                 struct kvm_mmu_page *shadow;
1221
1222                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1223                         ret = 1;
1224                         spte = shadow_trap_nonpresent_pte;
1225                         goto set_pte;
1226                 }
1227
1228                 spte |= PT_WRITABLE_MASK;
1229
1230                 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1231                 if (shadow) {
1232                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1233                                  __func__, gfn);
1234                         ret = 1;
1235                         pte_access &= ~ACC_WRITE_MASK;
1236                         if (is_writeble_pte(spte))
1237                                 spte &= ~PT_WRITABLE_MASK;
1238                 }
1239         }
1240
1241         if (pte_access & ACC_WRITE_MASK)
1242                 mark_page_dirty(vcpu->kvm, gfn);
1243
1244 set_pte:
1245         set_shadow_pte(shadow_pte, spte);
1246         return ret;
1247 }
1248
1249
1250 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1251                          unsigned pt_access, unsigned pte_access,
1252                          int user_fault, int write_fault, int dirty,
1253                          int *ptwrite, int largepage, gfn_t gfn,
1254                          pfn_t pfn, bool speculative)
1255 {
1256         int was_rmapped = 0;
1257         int was_writeble = is_writeble_pte(*shadow_pte);
1258
1259         pgprintk("%s: spte %llx access %x write_fault %d"
1260                  " user_fault %d gfn %lx\n",
1261                  __func__, *shadow_pte, pt_access,
1262                  write_fault, user_fault, gfn);
1263
1264         if (is_rmap_pte(*shadow_pte)) {
1265                 /*
1266                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1267                  * the parent of the now unreachable PTE.
1268                  */
1269                 if (largepage && !is_large_pte(*shadow_pte)) {
1270                         struct kvm_mmu_page *child;
1271                         u64 pte = *shadow_pte;
1272
1273                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1274                         mmu_page_remove_parent_pte(child, shadow_pte);
1275                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1276                         pgprintk("hfn old %lx new %lx\n",
1277                                  spte_to_pfn(*shadow_pte), pfn);
1278                         rmap_remove(vcpu->kvm, shadow_pte);
1279                 } else {
1280                         if (largepage)
1281                                 was_rmapped = is_large_pte(*shadow_pte);
1282                         else
1283                                 was_rmapped = 1;
1284                 }
1285         }
1286         if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1287                       dirty, largepage, gfn, pfn, speculative)) {
1288                 if (write_fault)
1289                         *ptwrite = 1;
1290                 kvm_x86_ops->tlb_flush(vcpu);
1291         }
1292
1293         pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1294         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1295                  is_large_pte(*shadow_pte)? "2MB" : "4kB",
1296                  is_present_pte(*shadow_pte)?"RW":"R", gfn,
1297                  *shadow_pte, shadow_pte);
1298         if (!was_rmapped && is_large_pte(*shadow_pte))
1299                 ++vcpu->kvm->stat.lpages;
1300
1301         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1302         if (!was_rmapped) {
1303                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1304                 if (!is_rmap_pte(*shadow_pte))
1305                         kvm_release_pfn_clean(pfn);
1306         } else {
1307                 if (was_writeble)
1308                         kvm_release_pfn_dirty(pfn);
1309                 else
1310                         kvm_release_pfn_clean(pfn);
1311         }
1312         if (speculative) {
1313                 vcpu->arch.last_pte_updated = shadow_pte;
1314                 vcpu->arch.last_pte_gfn = gfn;
1315         }
1316 }
1317
1318 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1319 {
1320 }
1321
1322 struct direct_shadow_walk {
1323         struct kvm_shadow_walk walker;
1324         pfn_t pfn;
1325         int write;
1326         int largepage;
1327         int pt_write;
1328 };
1329
1330 static int direct_map_entry(struct kvm_shadow_walk *_walk,
1331                             struct kvm_vcpu *vcpu,
1332                             u64 addr, u64 *sptep, int level)
1333 {
1334         struct direct_shadow_walk *walk =
1335                 container_of(_walk, struct direct_shadow_walk, walker);
1336         struct kvm_mmu_page *sp;
1337         gfn_t pseudo_gfn;
1338         gfn_t gfn = addr >> PAGE_SHIFT;
1339
1340         if (level == PT_PAGE_TABLE_LEVEL
1341             || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1342                 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1343                              0, walk->write, 1, &walk->pt_write,
1344                              walk->largepage, gfn, walk->pfn, false);
1345                 ++vcpu->stat.pf_fixed;
1346                 return 1;
1347         }
1348
1349         if (*sptep == shadow_trap_nonpresent_pte) {
1350                 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1351                 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
1352                                       1, ACC_ALL, sptep);
1353                 if (!sp) {
1354                         pgprintk("nonpaging_map: ENOMEM\n");
1355                         kvm_release_pfn_clean(walk->pfn);
1356                         return -ENOMEM;
1357                 }
1358
1359                 set_shadow_pte(sptep,
1360                                __pa(sp->spt)
1361                                | PT_PRESENT_MASK | PT_WRITABLE_MASK
1362                                | shadow_user_mask | shadow_x_mask);
1363         }
1364         return 0;
1365 }
1366
1367 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1368                         int largepage, gfn_t gfn, pfn_t pfn)
1369 {
1370         int r;
1371         struct direct_shadow_walk walker = {
1372                 .walker = { .entry = direct_map_entry, },
1373                 .pfn = pfn,
1374                 .largepage = largepage,
1375                 .write = write,
1376                 .pt_write = 0,
1377         };
1378
1379         r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1380         if (r < 0)
1381                 return r;
1382         return walker.pt_write;
1383 }
1384
1385 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1386 {
1387         int r;
1388         int largepage = 0;
1389         pfn_t pfn;
1390         unsigned long mmu_seq;
1391
1392         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1393                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1394                 largepage = 1;
1395         }
1396
1397         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1398         smp_rmb();
1399         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1400
1401         /* mmio */
1402         if (is_error_pfn(pfn)) {
1403                 kvm_release_pfn_clean(pfn);
1404                 return 1;
1405         }
1406
1407         spin_lock(&vcpu->kvm->mmu_lock);
1408         if (mmu_notifier_retry(vcpu, mmu_seq))
1409                 goto out_unlock;
1410         kvm_mmu_free_some_pages(vcpu);
1411         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1412         spin_unlock(&vcpu->kvm->mmu_lock);
1413
1414
1415         return r;
1416
1417 out_unlock:
1418         spin_unlock(&vcpu->kvm->mmu_lock);
1419         kvm_release_pfn_clean(pfn);
1420         return 0;
1421 }
1422
1423
1424 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1425 {
1426         int i;
1427         struct kvm_mmu_page *sp;
1428
1429         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1430                 return;
1431         spin_lock(&vcpu->kvm->mmu_lock);
1432         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1433                 hpa_t root = vcpu->arch.mmu.root_hpa;
1434
1435                 sp = page_header(root);
1436                 --sp->root_count;
1437                 if (!sp->root_count && sp->role.invalid)
1438                         kvm_mmu_zap_page(vcpu->kvm, sp);
1439                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1440                 spin_unlock(&vcpu->kvm->mmu_lock);
1441                 return;
1442         }
1443         for (i = 0; i < 4; ++i) {
1444                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1445
1446                 if (root) {
1447                         root &= PT64_BASE_ADDR_MASK;
1448                         sp = page_header(root);
1449                         --sp->root_count;
1450                         if (!sp->root_count && sp->role.invalid)
1451                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1452                 }
1453                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1454         }
1455         spin_unlock(&vcpu->kvm->mmu_lock);
1456         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1457 }
1458
1459 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1460 {
1461         int i;
1462         gfn_t root_gfn;
1463         struct kvm_mmu_page *sp;
1464         int metaphysical = 0;
1465
1466         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1467
1468         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1469                 hpa_t root = vcpu->arch.mmu.root_hpa;
1470
1471                 ASSERT(!VALID_PAGE(root));
1472                 if (tdp_enabled)
1473                         metaphysical = 1;
1474                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1475                                       PT64_ROOT_LEVEL, metaphysical,
1476                                       ACC_ALL, NULL);
1477                 root = __pa(sp->spt);
1478                 ++sp->root_count;
1479                 vcpu->arch.mmu.root_hpa = root;
1480                 return;
1481         }
1482         metaphysical = !is_paging(vcpu);
1483         if (tdp_enabled)
1484                 metaphysical = 1;
1485         for (i = 0; i < 4; ++i) {
1486                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1487
1488                 ASSERT(!VALID_PAGE(root));
1489                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1490                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1491                                 vcpu->arch.mmu.pae_root[i] = 0;
1492                                 continue;
1493                         }
1494                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1495                 } else if (vcpu->arch.mmu.root_level == 0)
1496                         root_gfn = 0;
1497                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1498                                       PT32_ROOT_LEVEL, metaphysical,
1499                                       ACC_ALL, NULL);
1500                 root = __pa(sp->spt);
1501                 ++sp->root_count;
1502                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1503         }
1504         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1505 }
1506
1507 static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1508 {
1509 }
1510
1511 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1512 {
1513         int i;
1514         struct kvm_mmu_page *sp;
1515
1516         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1517                 return;
1518         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1519                 hpa_t root = vcpu->arch.mmu.root_hpa;
1520                 sp = page_header(root);
1521                 mmu_sync_children(vcpu, sp);
1522                 return;
1523         }
1524         for (i = 0; i < 4; ++i) {
1525                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1526
1527                 if (root) {
1528                         root &= PT64_BASE_ADDR_MASK;
1529                         sp = page_header(root);
1530                         mmu_sync_children(vcpu, sp);
1531                 }
1532         }
1533 }
1534
1535 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1536 {
1537         spin_lock(&vcpu->kvm->mmu_lock);
1538         mmu_sync_roots(vcpu);
1539         spin_unlock(&vcpu->kvm->mmu_lock);
1540 }
1541
1542 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1543 {
1544         return vaddr;
1545 }
1546
1547 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1548                                 u32 error_code)
1549 {
1550         gfn_t gfn;
1551         int r;
1552
1553         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1554         r = mmu_topup_memory_caches(vcpu);
1555         if (r)
1556                 return r;
1557
1558         ASSERT(vcpu);
1559         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1560
1561         gfn = gva >> PAGE_SHIFT;
1562
1563         return nonpaging_map(vcpu, gva & PAGE_MASK,
1564                              error_code & PFERR_WRITE_MASK, gfn);
1565 }
1566
1567 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1568                                 u32 error_code)
1569 {
1570         pfn_t pfn;
1571         int r;
1572         int largepage = 0;
1573         gfn_t gfn = gpa >> PAGE_SHIFT;
1574         unsigned long mmu_seq;
1575
1576         ASSERT(vcpu);
1577         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1578
1579         r = mmu_topup_memory_caches(vcpu);
1580         if (r)
1581                 return r;
1582
1583         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1584                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1585                 largepage = 1;
1586         }
1587         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1588         smp_rmb();
1589         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1590         if (is_error_pfn(pfn)) {
1591                 kvm_release_pfn_clean(pfn);
1592                 return 1;
1593         }
1594         spin_lock(&vcpu->kvm->mmu_lock);
1595         if (mmu_notifier_retry(vcpu, mmu_seq))
1596                 goto out_unlock;
1597         kvm_mmu_free_some_pages(vcpu);
1598         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1599                          largepage, gfn, pfn);
1600         spin_unlock(&vcpu->kvm->mmu_lock);
1601
1602         return r;
1603
1604 out_unlock:
1605         spin_unlock(&vcpu->kvm->mmu_lock);
1606         kvm_release_pfn_clean(pfn);
1607         return 0;
1608 }
1609
1610 static void nonpaging_free(struct kvm_vcpu *vcpu)
1611 {
1612         mmu_free_roots(vcpu);
1613 }
1614
1615 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1616 {
1617         struct kvm_mmu *context = &vcpu->arch.mmu;
1618
1619         context->new_cr3 = nonpaging_new_cr3;
1620         context->page_fault = nonpaging_page_fault;
1621         context->gva_to_gpa = nonpaging_gva_to_gpa;
1622         context->free = nonpaging_free;
1623         context->prefetch_page = nonpaging_prefetch_page;
1624         context->sync_page = nonpaging_sync_page;
1625         context->invlpg = nonpaging_invlpg;
1626         context->root_level = 0;
1627         context->shadow_root_level = PT32E_ROOT_LEVEL;
1628         context->root_hpa = INVALID_PAGE;
1629         return 0;
1630 }
1631
1632 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1633 {
1634         ++vcpu->stat.tlb_flush;
1635         kvm_x86_ops->tlb_flush(vcpu);
1636 }
1637
1638 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1639 {
1640         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1641         mmu_free_roots(vcpu);
1642 }
1643
1644 static void inject_page_fault(struct kvm_vcpu *vcpu,
1645                               u64 addr,
1646                               u32 err_code)
1647 {
1648         kvm_inject_page_fault(vcpu, addr, err_code);
1649 }
1650
1651 static void paging_free(struct kvm_vcpu *vcpu)
1652 {
1653         nonpaging_free(vcpu);
1654 }
1655
1656 #define PTTYPE 64
1657 #include "paging_tmpl.h"
1658 #undef PTTYPE
1659
1660 #define PTTYPE 32
1661 #include "paging_tmpl.h"
1662 #undef PTTYPE
1663
1664 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1665 {
1666         struct kvm_mmu *context = &vcpu->arch.mmu;
1667
1668         ASSERT(is_pae(vcpu));
1669         context->new_cr3 = paging_new_cr3;
1670         context->page_fault = paging64_page_fault;
1671         context->gva_to_gpa = paging64_gva_to_gpa;
1672         context->prefetch_page = paging64_prefetch_page;
1673         context->sync_page = paging64_sync_page;
1674         context->invlpg = paging64_invlpg;
1675         context->free = paging_free;
1676         context->root_level = level;
1677         context->shadow_root_level = level;
1678         context->root_hpa = INVALID_PAGE;
1679         return 0;
1680 }
1681
1682 static int paging64_init_context(struct kvm_vcpu *vcpu)
1683 {
1684         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1685 }
1686
1687 static int paging32_init_context(struct kvm_vcpu *vcpu)
1688 {
1689         struct kvm_mmu *context = &vcpu->arch.mmu;
1690
1691         context->new_cr3 = paging_new_cr3;
1692         context->page_fault = paging32_page_fault;
1693         context->gva_to_gpa = paging32_gva_to_gpa;
1694         context->free = paging_free;
1695         context->prefetch_page = paging32_prefetch_page;
1696         context->sync_page = paging32_sync_page;
1697         context->invlpg = paging32_invlpg;
1698         context->root_level = PT32_ROOT_LEVEL;
1699         context->shadow_root_level = PT32E_ROOT_LEVEL;
1700         context->root_hpa = INVALID_PAGE;
1701         return 0;
1702 }
1703
1704 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1705 {
1706         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1707 }
1708
1709 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1710 {
1711         struct kvm_mmu *context = &vcpu->arch.mmu;
1712
1713         context->new_cr3 = nonpaging_new_cr3;
1714         context->page_fault = tdp_page_fault;
1715         context->free = nonpaging_free;
1716         context->prefetch_page = nonpaging_prefetch_page;
1717         context->sync_page = nonpaging_sync_page;
1718         context->invlpg = nonpaging_invlpg;
1719         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1720         context->root_hpa = INVALID_PAGE;
1721
1722         if (!is_paging(vcpu)) {
1723                 context->gva_to_gpa = nonpaging_gva_to_gpa;
1724                 context->root_level = 0;
1725         } else if (is_long_mode(vcpu)) {
1726                 context->gva_to_gpa = paging64_gva_to_gpa;
1727                 context->root_level = PT64_ROOT_LEVEL;
1728         } else if (is_pae(vcpu)) {
1729                 context->gva_to_gpa = paging64_gva_to_gpa;
1730                 context->root_level = PT32E_ROOT_LEVEL;
1731         } else {
1732                 context->gva_to_gpa = paging32_gva_to_gpa;
1733                 context->root_level = PT32_ROOT_LEVEL;
1734         }
1735
1736         return 0;
1737 }
1738
1739 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1740 {
1741         ASSERT(vcpu);
1742         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1743
1744         if (!is_paging(vcpu))
1745                 return nonpaging_init_context(vcpu);
1746         else if (is_long_mode(vcpu))
1747                 return paging64_init_context(vcpu);
1748         else if (is_pae(vcpu))
1749                 return paging32E_init_context(vcpu);
1750         else
1751                 return paging32_init_context(vcpu);
1752 }
1753
1754 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1755 {
1756         vcpu->arch.update_pte.pfn = bad_pfn;
1757
1758         if (tdp_enabled)
1759                 return init_kvm_tdp_mmu(vcpu);
1760         else
1761                 return init_kvm_softmmu(vcpu);
1762 }
1763
1764 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1765 {
1766         ASSERT(vcpu);
1767         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1768                 vcpu->arch.mmu.free(vcpu);
1769                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1770         }
1771 }
1772
1773 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1774 {
1775         destroy_kvm_mmu(vcpu);
1776         return init_kvm_mmu(vcpu);
1777 }
1778 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1779
1780 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1781 {
1782         int r;
1783
1784         r = mmu_topup_memory_caches(vcpu);
1785         if (r)
1786                 goto out;
1787         spin_lock(&vcpu->kvm->mmu_lock);
1788         kvm_mmu_free_some_pages(vcpu);
1789         mmu_alloc_roots(vcpu);
1790         mmu_sync_roots(vcpu);
1791         spin_unlock(&vcpu->kvm->mmu_lock);
1792         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1793         kvm_mmu_flush_tlb(vcpu);
1794 out:
1795         return r;
1796 }
1797 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1798
1799 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1800 {
1801         mmu_free_roots(vcpu);
1802 }
1803
1804 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1805                                   struct kvm_mmu_page *sp,
1806                                   u64 *spte)
1807 {
1808         u64 pte;
1809         struct kvm_mmu_page *child;
1810
1811         pte = *spte;
1812         if (is_shadow_present_pte(pte)) {
1813                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1814                     is_large_pte(pte))
1815                         rmap_remove(vcpu->kvm, spte);
1816                 else {
1817                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1818                         mmu_page_remove_parent_pte(child, spte);
1819                 }
1820         }
1821         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1822         if (is_large_pte(pte))
1823                 --vcpu->kvm->stat.lpages;
1824 }
1825
1826 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1827                                   struct kvm_mmu_page *sp,
1828                                   u64 *spte,
1829                                   const void *new)
1830 {
1831         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1832                 if (!vcpu->arch.update_pte.largepage ||
1833                     sp->role.glevels == PT32_ROOT_LEVEL) {
1834                         ++vcpu->kvm->stat.mmu_pde_zapped;
1835                         return;
1836                 }
1837         }
1838
1839         ++vcpu->kvm->stat.mmu_pte_updated;
1840         if (sp->role.glevels == PT32_ROOT_LEVEL)
1841                 paging32_update_pte(vcpu, sp, spte, new);
1842         else
1843                 paging64_update_pte(vcpu, sp, spte, new);
1844 }
1845
1846 static bool need_remote_flush(u64 old, u64 new)
1847 {
1848         if (!is_shadow_present_pte(old))
1849                 return false;
1850         if (!is_shadow_present_pte(new))
1851                 return true;
1852         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1853                 return true;
1854         old ^= PT64_NX_MASK;
1855         new ^= PT64_NX_MASK;
1856         return (old & ~new & PT64_PERM_MASK) != 0;
1857 }
1858
1859 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1860 {
1861         if (need_remote_flush(old, new))
1862                 kvm_flush_remote_tlbs(vcpu->kvm);
1863         else
1864                 kvm_mmu_flush_tlb(vcpu);
1865 }
1866
1867 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1868 {
1869         u64 *spte = vcpu->arch.last_pte_updated;
1870
1871         return !!(spte && (*spte & shadow_accessed_mask));
1872 }
1873
1874 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1875                                           const u8 *new, int bytes)
1876 {
1877         gfn_t gfn;
1878         int r;
1879         u64 gpte = 0;
1880         pfn_t pfn;
1881
1882         vcpu->arch.update_pte.largepage = 0;
1883
1884         if (bytes != 4 && bytes != 8)
1885                 return;
1886
1887         /*
1888          * Assume that the pte write on a page table of the same type
1889          * as the current vcpu paging mode.  This is nearly always true
1890          * (might be false while changing modes).  Note it is verified later
1891          * by update_pte().
1892          */
1893         if (is_pae(vcpu)) {
1894                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1895                 if ((bytes == 4) && (gpa % 4 == 0)) {
1896                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1897                         if (r)
1898                                 return;
1899                         memcpy((void *)&gpte + (gpa % 8), new, 4);
1900                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1901                         memcpy((void *)&gpte, new, 8);
1902                 }
1903         } else {
1904                 if ((bytes == 4) && (gpa % 4 == 0))
1905                         memcpy((void *)&gpte, new, 4);
1906         }
1907         if (!is_present_pte(gpte))
1908                 return;
1909         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1910
1911         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1912                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1913                 vcpu->arch.update_pte.largepage = 1;
1914         }
1915         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1916         smp_rmb();
1917         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1918
1919         if (is_error_pfn(pfn)) {
1920                 kvm_release_pfn_clean(pfn);
1921                 return;
1922         }
1923         vcpu->arch.update_pte.gfn = gfn;
1924         vcpu->arch.update_pte.pfn = pfn;
1925 }
1926
1927 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1928 {
1929         u64 *spte = vcpu->arch.last_pte_updated;
1930
1931         if (spte
1932             && vcpu->arch.last_pte_gfn == gfn
1933             && shadow_accessed_mask
1934             && !(*spte & shadow_accessed_mask)
1935             && is_shadow_present_pte(*spte))
1936                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1937 }
1938
1939 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1940                        const u8 *new, int bytes)
1941 {
1942         gfn_t gfn = gpa >> PAGE_SHIFT;
1943         struct kvm_mmu_page *sp;
1944         struct hlist_node *node, *n;
1945         struct hlist_head *bucket;
1946         unsigned index;
1947         u64 entry, gentry;
1948         u64 *spte;
1949         unsigned offset = offset_in_page(gpa);
1950         unsigned pte_size;
1951         unsigned page_offset;
1952         unsigned misaligned;
1953         unsigned quadrant;
1954         int level;
1955         int flooded = 0;
1956         int npte;
1957         int r;
1958
1959         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1960         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1961         spin_lock(&vcpu->kvm->mmu_lock);
1962         kvm_mmu_access_page(vcpu, gfn);
1963         kvm_mmu_free_some_pages(vcpu);
1964         ++vcpu->kvm->stat.mmu_pte_write;
1965         kvm_mmu_audit(vcpu, "pre pte write");
1966         if (gfn == vcpu->arch.last_pt_write_gfn
1967             && !last_updated_pte_accessed(vcpu)) {
1968                 ++vcpu->arch.last_pt_write_count;
1969                 if (vcpu->arch.last_pt_write_count >= 3)
1970                         flooded = 1;
1971         } else {
1972                 vcpu->arch.last_pt_write_gfn = gfn;
1973                 vcpu->arch.last_pt_write_count = 1;
1974                 vcpu->arch.last_pte_updated = NULL;
1975         }
1976         index = kvm_page_table_hashfn(gfn);
1977         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1978         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1979                 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
1980                         continue;
1981                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1982                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1983                 misaligned |= bytes < 4;
1984                 if (misaligned || flooded) {
1985                         /*
1986                          * Misaligned accesses are too much trouble to fix
1987                          * up; also, they usually indicate a page is not used
1988                          * as a page table.
1989                          *
1990                          * If we're seeing too many writes to a page,
1991                          * it may no longer be a page table, or we may be
1992                          * forking, in which case it is better to unmap the
1993                          * page.
1994                          */
1995                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1996                                  gpa, bytes, sp->role.word);
1997                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
1998                                 n = bucket->first;
1999                         ++vcpu->kvm->stat.mmu_flooded;
2000                         continue;
2001                 }
2002                 page_offset = offset;
2003                 level = sp->role.level;
2004                 npte = 1;
2005                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2006                         page_offset <<= 1;      /* 32->64 */
2007                         /*
2008                          * A 32-bit pde maps 4MB while the shadow pdes map
2009                          * only 2MB.  So we need to double the offset again
2010                          * and zap two pdes instead of one.
2011                          */
2012                         if (level == PT32_ROOT_LEVEL) {
2013                                 page_offset &= ~7; /* kill rounding error */
2014                                 page_offset <<= 1;
2015                                 npte = 2;
2016                         }
2017                         quadrant = page_offset >> PAGE_SHIFT;
2018                         page_offset &= ~PAGE_MASK;
2019                         if (quadrant != sp->role.quadrant)
2020                                 continue;
2021                 }
2022                 spte = &sp->spt[page_offset / sizeof(*spte)];
2023                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2024                         gentry = 0;
2025                         r = kvm_read_guest_atomic(vcpu->kvm,
2026                                                   gpa & ~(u64)(pte_size - 1),
2027                                                   &gentry, pte_size);
2028                         new = (const void *)&gentry;
2029                         if (r < 0)
2030                                 new = NULL;
2031                 }
2032                 while (npte--) {
2033                         entry = *spte;
2034                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2035                         if (new)
2036                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2037                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2038                         ++spte;
2039                 }
2040         }
2041         kvm_mmu_audit(vcpu, "post pte write");
2042         spin_unlock(&vcpu->kvm->mmu_lock);
2043         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2044                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2045                 vcpu->arch.update_pte.pfn = bad_pfn;
2046         }
2047 }
2048
2049 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2050 {
2051         gpa_t gpa;
2052         int r;
2053
2054         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2055
2056         spin_lock(&vcpu->kvm->mmu_lock);
2057         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2058         spin_unlock(&vcpu->kvm->mmu_lock);
2059         return r;
2060 }
2061 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2062
2063 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2064 {
2065         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2066                 struct kvm_mmu_page *sp;
2067
2068                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2069                                   struct kvm_mmu_page, link);
2070                 kvm_mmu_zap_page(vcpu->kvm, sp);
2071                 ++vcpu->kvm->stat.mmu_recycled;
2072         }
2073 }
2074
2075 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2076 {
2077         int r;
2078         enum emulation_result er;
2079
2080         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2081         if (r < 0)
2082                 goto out;
2083
2084         if (!r) {
2085                 r = 1;
2086                 goto out;
2087         }
2088
2089         r = mmu_topup_memory_caches(vcpu);
2090         if (r)
2091                 goto out;
2092
2093         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2094
2095         switch (er) {
2096         case EMULATE_DONE:
2097                 return 1;
2098         case EMULATE_DO_MMIO:
2099                 ++vcpu->stat.mmio_exits;
2100                 return 0;
2101         case EMULATE_FAIL:
2102                 kvm_report_emulation_failure(vcpu, "pagetable");
2103                 return 1;
2104         default:
2105                 BUG();
2106         }
2107 out:
2108         return r;
2109 }
2110 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2111
2112 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2113 {
2114         spin_lock(&vcpu->kvm->mmu_lock);
2115         vcpu->arch.mmu.invlpg(vcpu, gva);
2116         spin_unlock(&vcpu->kvm->mmu_lock);
2117         kvm_mmu_flush_tlb(vcpu);
2118         ++vcpu->stat.invlpg;
2119 }
2120 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2121
2122 void kvm_enable_tdp(void)
2123 {
2124         tdp_enabled = true;
2125 }
2126 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2127
2128 void kvm_disable_tdp(void)
2129 {
2130         tdp_enabled = false;
2131 }
2132 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2133
2134 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2135 {
2136         struct kvm_mmu_page *sp;
2137
2138         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2139                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
2140                                   struct kvm_mmu_page, link);
2141                 kvm_mmu_zap_page(vcpu->kvm, sp);
2142                 cond_resched();
2143         }
2144         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2145 }
2146
2147 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2148 {
2149         struct page *page;
2150         int i;
2151
2152         ASSERT(vcpu);
2153
2154         if (vcpu->kvm->arch.n_requested_mmu_pages)
2155                 vcpu->kvm->arch.n_free_mmu_pages =
2156                                         vcpu->kvm->arch.n_requested_mmu_pages;
2157         else
2158                 vcpu->kvm->arch.n_free_mmu_pages =
2159                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2160         /*
2161          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2162          * Therefore we need to allocate shadow page tables in the first
2163          * 4GB of memory, which happens to fit the DMA32 zone.
2164          */
2165         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2166         if (!page)
2167                 goto error_1;
2168         vcpu->arch.mmu.pae_root = page_address(page);
2169         for (i = 0; i < 4; ++i)
2170                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2171
2172         return 0;
2173
2174 error_1:
2175         free_mmu_pages(vcpu);
2176         return -ENOMEM;
2177 }
2178
2179 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2180 {
2181         ASSERT(vcpu);
2182         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2183
2184         return alloc_mmu_pages(vcpu);
2185 }
2186
2187 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2188 {
2189         ASSERT(vcpu);
2190         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2191
2192         return init_kvm_mmu(vcpu);
2193 }
2194
2195 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2196 {
2197         ASSERT(vcpu);
2198
2199         destroy_kvm_mmu(vcpu);
2200         free_mmu_pages(vcpu);
2201         mmu_free_memory_caches(vcpu);
2202 }
2203
2204 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2205 {
2206         struct kvm_mmu_page *sp;
2207
2208         spin_lock(&kvm->mmu_lock);
2209         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2210                 int i;
2211                 u64 *pt;
2212
2213                 if (!test_bit(slot, &sp->slot_bitmap))
2214                         continue;
2215
2216                 pt = sp->spt;
2217                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2218                         /* avoid RMW */
2219                         if (pt[i] & PT_WRITABLE_MASK)
2220                                 pt[i] &= ~PT_WRITABLE_MASK;
2221         }
2222         kvm_flush_remote_tlbs(kvm);
2223         spin_unlock(&kvm->mmu_lock);
2224 }
2225
2226 void kvm_mmu_zap_all(struct kvm *kvm)
2227 {
2228         struct kvm_mmu_page *sp, *node;
2229
2230         spin_lock(&kvm->mmu_lock);
2231         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2232                 if (kvm_mmu_zap_page(kvm, sp))
2233                         node = container_of(kvm->arch.active_mmu_pages.next,
2234                                             struct kvm_mmu_page, link);
2235         spin_unlock(&kvm->mmu_lock);
2236
2237         kvm_flush_remote_tlbs(kvm);
2238 }
2239
2240 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2241 {
2242         struct kvm_mmu_page *page;
2243
2244         page = container_of(kvm->arch.active_mmu_pages.prev,
2245                             struct kvm_mmu_page, link);
2246         kvm_mmu_zap_page(kvm, page);
2247 }
2248
2249 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2250 {
2251         struct kvm *kvm;
2252         struct kvm *kvm_freed = NULL;
2253         int cache_count = 0;
2254
2255         spin_lock(&kvm_lock);
2256
2257         list_for_each_entry(kvm, &vm_list, vm_list) {
2258                 int npages;
2259
2260                 if (!down_read_trylock(&kvm->slots_lock))
2261                         continue;
2262                 spin_lock(&kvm->mmu_lock);
2263                 npages = kvm->arch.n_alloc_mmu_pages -
2264                          kvm->arch.n_free_mmu_pages;
2265                 cache_count += npages;
2266                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2267                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2268                         cache_count--;
2269                         kvm_freed = kvm;
2270                 }
2271                 nr_to_scan--;
2272
2273                 spin_unlock(&kvm->mmu_lock);
2274                 up_read(&kvm->slots_lock);
2275         }
2276         if (kvm_freed)
2277                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2278
2279         spin_unlock(&kvm_lock);
2280
2281         return cache_count;
2282 }
2283
2284 static struct shrinker mmu_shrinker = {
2285         .shrink = mmu_shrink,
2286         .seeks = DEFAULT_SEEKS * 10,
2287 };
2288
2289 static void mmu_destroy_caches(void)
2290 {
2291         if (pte_chain_cache)
2292                 kmem_cache_destroy(pte_chain_cache);
2293         if (rmap_desc_cache)
2294                 kmem_cache_destroy(rmap_desc_cache);
2295         if (mmu_page_header_cache)
2296                 kmem_cache_destroy(mmu_page_header_cache);
2297 }
2298
2299 void kvm_mmu_module_exit(void)
2300 {
2301         mmu_destroy_caches();
2302         unregister_shrinker(&mmu_shrinker);
2303 }
2304
2305 int kvm_mmu_module_init(void)
2306 {
2307         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2308                                             sizeof(struct kvm_pte_chain),
2309                                             0, 0, NULL);
2310         if (!pte_chain_cache)
2311                 goto nomem;
2312         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2313                                             sizeof(struct kvm_rmap_desc),
2314                                             0, 0, NULL);
2315         if (!rmap_desc_cache)
2316                 goto nomem;
2317
2318         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2319                                                   sizeof(struct kvm_mmu_page),
2320                                                   0, 0, NULL);
2321         if (!mmu_page_header_cache)
2322                 goto nomem;
2323
2324         register_shrinker(&mmu_shrinker);
2325
2326         return 0;
2327
2328 nomem:
2329         mmu_destroy_caches();
2330         return -ENOMEM;
2331 }
2332
2333 /*
2334  * Caculate mmu pages needed for kvm.
2335  */
2336 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2337 {
2338         int i;
2339         unsigned int nr_mmu_pages;
2340         unsigned int  nr_pages = 0;
2341
2342         for (i = 0; i < kvm->nmemslots; i++)
2343                 nr_pages += kvm->memslots[i].npages;
2344
2345         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2346         nr_mmu_pages = max(nr_mmu_pages,
2347                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2348
2349         return nr_mmu_pages;
2350 }
2351
2352 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2353                                 unsigned len)
2354 {
2355         if (len > buffer->len)
2356                 return NULL;
2357         return buffer->ptr;
2358 }
2359
2360 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2361                                 unsigned len)
2362 {
2363         void *ret;
2364
2365         ret = pv_mmu_peek_buffer(buffer, len);
2366         if (!ret)
2367                 return ret;
2368         buffer->ptr += len;
2369         buffer->len -= len;
2370         buffer->processed += len;
2371         return ret;
2372 }
2373
2374 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2375                              gpa_t addr, gpa_t value)
2376 {
2377         int bytes = 8;
2378         int r;
2379
2380         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2381                 bytes = 4;
2382
2383         r = mmu_topup_memory_caches(vcpu);
2384         if (r)
2385                 return r;
2386
2387         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2388                 return -EFAULT;
2389
2390         return 1;
2391 }
2392
2393 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2394 {
2395         kvm_x86_ops->tlb_flush(vcpu);
2396         return 1;
2397 }
2398
2399 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2400 {
2401         spin_lock(&vcpu->kvm->mmu_lock);
2402         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2403         spin_unlock(&vcpu->kvm->mmu_lock);
2404         return 1;
2405 }
2406
2407 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2408                              struct kvm_pv_mmu_op_buffer *buffer)
2409 {
2410         struct kvm_mmu_op_header *header;
2411
2412         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2413         if (!header)
2414                 return 0;
2415         switch (header->op) {
2416         case KVM_MMU_OP_WRITE_PTE: {
2417                 struct kvm_mmu_op_write_pte *wpte;
2418
2419                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2420                 if (!wpte)
2421                         return 0;
2422                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2423                                         wpte->pte_val);
2424         }
2425         case KVM_MMU_OP_FLUSH_TLB: {
2426                 struct kvm_mmu_op_flush_tlb *ftlb;
2427
2428                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2429                 if (!ftlb)
2430                         return 0;
2431                 return kvm_pv_mmu_flush_tlb(vcpu);
2432         }
2433         case KVM_MMU_OP_RELEASE_PT: {
2434                 struct kvm_mmu_op_release_pt *rpt;
2435
2436                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2437                 if (!rpt)
2438                         return 0;
2439                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2440         }
2441         default: return 0;
2442         }
2443 }
2444
2445 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2446                   gpa_t addr, unsigned long *ret)
2447 {
2448         int r;
2449         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2450
2451         buffer->ptr = buffer->buf;
2452         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2453         buffer->processed = 0;
2454
2455         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2456         if (r)
2457                 goto out;
2458
2459         while (buffer->len) {
2460                 r = kvm_pv_mmu_op_one(vcpu, buffer);
2461                 if (r < 0)
2462                         goto out;
2463                 if (r == 0)
2464                         break;
2465         }
2466
2467         r = 1;
2468 out:
2469         *ret = buffer->processed;
2470         return r;
2471 }
2472
2473 #ifdef AUDIT
2474
2475 static const char *audit_msg;
2476
2477 static gva_t canonicalize(gva_t gva)
2478 {
2479 #ifdef CONFIG_X86_64
2480         gva = (long long)(gva << 16) >> 16;
2481 #endif
2482         return gva;
2483 }
2484
2485 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2486                                 gva_t va, int level)
2487 {
2488         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2489         int i;
2490         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2491
2492         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2493                 u64 ent = pt[i];
2494
2495                 if (ent == shadow_trap_nonpresent_pte)
2496                         continue;
2497
2498                 va = canonicalize(va);
2499                 if (level > 1) {
2500                         if (ent == shadow_notrap_nonpresent_pte)
2501                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
2502                                        " in nonleaf level: levels %d gva %lx"
2503                                        " level %d pte %llx\n", audit_msg,
2504                                        vcpu->arch.mmu.root_level, va, level, ent);
2505
2506                         audit_mappings_page(vcpu, ent, va, level - 1);
2507                 } else {
2508                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2509                         hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
2510
2511                         if (is_shadow_present_pte(ent)
2512                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
2513                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
2514                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2515                                        audit_msg, vcpu->arch.mmu.root_level,
2516                                        va, gpa, hpa, ent,
2517                                        is_shadow_present_pte(ent));
2518                         else if (ent == shadow_notrap_nonpresent_pte
2519                                  && !is_error_hpa(hpa))
2520                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
2521                                        " valid guest gva %lx\n", audit_msg, va);
2522                         kvm_release_pfn_clean(pfn);
2523
2524                 }
2525         }
2526 }
2527
2528 static void audit_mappings(struct kvm_vcpu *vcpu)
2529 {
2530         unsigned i;
2531
2532         if (vcpu->arch.mmu.root_level == 4)
2533                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2534         else
2535                 for (i = 0; i < 4; ++i)
2536                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2537                                 audit_mappings_page(vcpu,
2538                                                     vcpu->arch.mmu.pae_root[i],
2539                                                     i << 30,
2540                                                     2);
2541 }
2542
2543 static int count_rmaps(struct kvm_vcpu *vcpu)
2544 {
2545         int nmaps = 0;
2546         int i, j, k;
2547
2548         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2549                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2550                 struct kvm_rmap_desc *d;
2551
2552                 for (j = 0; j < m->npages; ++j) {
2553                         unsigned long *rmapp = &m->rmap[j];
2554
2555                         if (!*rmapp)
2556                                 continue;
2557                         if (!(*rmapp & 1)) {
2558                                 ++nmaps;
2559                                 continue;
2560                         }
2561                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2562                         while (d) {
2563                                 for (k = 0; k < RMAP_EXT; ++k)
2564                                         if (d->shadow_ptes[k])
2565                                                 ++nmaps;
2566                                         else
2567                                                 break;
2568                                 d = d->more;
2569                         }
2570                 }
2571         }
2572         return nmaps;
2573 }
2574
2575 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2576 {
2577         int nmaps = 0;
2578         struct kvm_mmu_page *sp;
2579         int i;
2580
2581         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2582                 u64 *pt = sp->spt;
2583
2584                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2585                         continue;
2586
2587                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2588                         u64 ent = pt[i];
2589
2590                         if (!(ent & PT_PRESENT_MASK))
2591                                 continue;
2592                         if (!(ent & PT_WRITABLE_MASK))
2593                                 continue;
2594                         ++nmaps;
2595                 }
2596         }
2597         return nmaps;
2598 }
2599
2600 static void audit_rmap(struct kvm_vcpu *vcpu)
2601 {
2602         int n_rmap = count_rmaps(vcpu);
2603         int n_actual = count_writable_mappings(vcpu);
2604
2605         if (n_rmap != n_actual)
2606                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2607                        __func__, audit_msg, n_rmap, n_actual);
2608 }
2609
2610 static void audit_write_protection(struct kvm_vcpu *vcpu)
2611 {
2612         struct kvm_mmu_page *sp;
2613         struct kvm_memory_slot *slot;
2614         unsigned long *rmapp;
2615         gfn_t gfn;
2616
2617         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2618                 if (sp->role.metaphysical)
2619                         continue;
2620
2621                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2622                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2623                 rmapp = &slot->rmap[gfn - slot->base_gfn];
2624                 if (*rmapp)
2625                         printk(KERN_ERR "%s: (%s) shadow page has writable"
2626                                " mappings: gfn %lx role %x\n",
2627                                __func__, audit_msg, sp->gfn,
2628                                sp->role.word);
2629         }
2630 }
2631
2632 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2633 {
2634         int olddbg = dbg;
2635
2636         dbg = 0;
2637         audit_msg = msg;
2638         audit_rmap(vcpu);
2639         audit_write_protection(vcpu);
2640         audit_mappings(vcpu);
2641         dbg = olddbg;
2642 }
2643
2644 #endif