]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kvm/mmu.c
4a21b0f8491c4a6dda5925d5992de6a9c68c58c0
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21
22 #include <linux/kvm_host.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <linux/module.h>
28 #include <linux/swap.h>
29 #include <linux/hugetlb.h>
30 #include <linux/compiler.h>
31
32 #include <asm/page.h>
33 #include <asm/cmpxchg.h>
34 #include <asm/io.h>
35 #include <asm/vmx.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 0;
70 module_param(dbg, bool, 0644);
71 #endif
72
73 static int oos_shadow = 1;
74 module_param(oos_shadow, bool, 0644);
75
76 #ifndef MMU_DEBUG
77 #define ASSERT(x) do { } while (0)
78 #else
79 #define ASSERT(x)                                                       \
80         if (!(x)) {                                                     \
81                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
82                        __FILE__, __LINE__, #x);                         \
83         }
84 #endif
85
86 #define PT_FIRST_AVAIL_BITS_SHIFT 9
87 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
88
89 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
90
91 #define PT64_LEVEL_BITS 9
92
93 #define PT64_LEVEL_SHIFT(level) \
94                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
95
96 #define PT64_LEVEL_MASK(level) \
97                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
98
99 #define PT64_INDEX(address, level)\
100         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
101
102
103 #define PT32_LEVEL_BITS 10
104
105 #define PT32_LEVEL_SHIFT(level) \
106                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
107
108 #define PT32_LEVEL_MASK(level) \
109                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
110
111 #define PT32_INDEX(address, level)\
112         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
113
114
115 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
116 #define PT64_DIR_BASE_ADDR_MASK \
117         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
118
119 #define PT32_BASE_ADDR_MASK PAGE_MASK
120 #define PT32_DIR_BASE_ADDR_MASK \
121         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
122
123 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
124                         | PT64_NX_MASK)
125
126 #define PFERR_PRESENT_MASK (1U << 0)
127 #define PFERR_WRITE_MASK (1U << 1)
128 #define PFERR_USER_MASK (1U << 2)
129 #define PFERR_FETCH_MASK (1U << 4)
130
131 #define PT_DIRECTORY_LEVEL 2
132 #define PT_PAGE_TABLE_LEVEL 1
133
134 #define RMAP_EXT 4
135
136 #define ACC_EXEC_MASK    1
137 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
138 #define ACC_USER_MASK    PT_USER_MASK
139 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
140
141 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
142
143 struct kvm_rmap_desc {
144         u64 *shadow_ptes[RMAP_EXT];
145         struct kvm_rmap_desc *more;
146 };
147
148 struct kvm_shadow_walk_iterator {
149         u64 addr;
150         hpa_t shadow_addr;
151         int level;
152         u64 *sptep;
153         unsigned index;
154 };
155
156 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
157         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
158              shadow_walk_okay(&(_walker));                      \
159              shadow_walk_next(&(_walker)))
160
161
162 struct kvm_unsync_walk {
163         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
164 };
165
166 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
167
168 static struct kmem_cache *pte_chain_cache;
169 static struct kmem_cache *rmap_desc_cache;
170 static struct kmem_cache *mmu_page_header_cache;
171
172 static u64 __read_mostly shadow_trap_nonpresent_pte;
173 static u64 __read_mostly shadow_notrap_nonpresent_pte;
174 static u64 __read_mostly shadow_base_present_pte;
175 static u64 __read_mostly shadow_nx_mask;
176 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
177 static u64 __read_mostly shadow_user_mask;
178 static u64 __read_mostly shadow_accessed_mask;
179 static u64 __read_mostly shadow_dirty_mask;
180 static u64 __read_mostly shadow_mt_mask;
181
182 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183 {
184         shadow_trap_nonpresent_pte = trap_pte;
185         shadow_notrap_nonpresent_pte = notrap_pte;
186 }
187 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
188
189 void kvm_mmu_set_base_ptes(u64 base_pte)
190 {
191         shadow_base_present_pte = base_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
194
195 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
196                 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
197 {
198         shadow_user_mask = user_mask;
199         shadow_accessed_mask = accessed_mask;
200         shadow_dirty_mask = dirty_mask;
201         shadow_nx_mask = nx_mask;
202         shadow_x_mask = x_mask;
203         shadow_mt_mask = mt_mask;
204 }
205 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
206
207 static int is_write_protection(struct kvm_vcpu *vcpu)
208 {
209         return vcpu->arch.cr0 & X86_CR0_WP;
210 }
211
212 static int is_cpuid_PSE36(void)
213 {
214         return 1;
215 }
216
217 static int is_nx(struct kvm_vcpu *vcpu)
218 {
219         return vcpu->arch.shadow_efer & EFER_NX;
220 }
221
222 static int is_present_pte(unsigned long pte)
223 {
224         return pte & PT_PRESENT_MASK;
225 }
226
227 static int is_shadow_present_pte(u64 pte)
228 {
229         return pte != shadow_trap_nonpresent_pte
230                 && pte != shadow_notrap_nonpresent_pte;
231 }
232
233 static int is_large_pte(u64 pte)
234 {
235         return pte & PT_PAGE_SIZE_MASK;
236 }
237
238 static int is_writeble_pte(unsigned long pte)
239 {
240         return pte & PT_WRITABLE_MASK;
241 }
242
243 static int is_dirty_pte(unsigned long pte)
244 {
245         return pte & shadow_dirty_mask;
246 }
247
248 static int is_rmap_pte(u64 pte)
249 {
250         return is_shadow_present_pte(pte);
251 }
252
253 static pfn_t spte_to_pfn(u64 pte)
254 {
255         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
256 }
257
258 static gfn_t pse36_gfn_delta(u32 gpte)
259 {
260         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
261
262         return (gpte & PT32_DIR_PSE36_MASK) << shift;
263 }
264
265 static void set_shadow_pte(u64 *sptep, u64 spte)
266 {
267 #ifdef CONFIG_X86_64
268         set_64bit((unsigned long *)sptep, spte);
269 #else
270         set_64bit((unsigned long long *)sptep, spte);
271 #endif
272 }
273
274 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
275                                   struct kmem_cache *base_cache, int min)
276 {
277         void *obj;
278
279         if (cache->nobjs >= min)
280                 return 0;
281         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
282                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
283                 if (!obj)
284                         return -ENOMEM;
285                 cache->objects[cache->nobjs++] = obj;
286         }
287         return 0;
288 }
289
290 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
291 {
292         while (mc->nobjs)
293                 kfree(mc->objects[--mc->nobjs]);
294 }
295
296 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
297                                        int min)
298 {
299         struct page *page;
300
301         if (cache->nobjs >= min)
302                 return 0;
303         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
304                 page = alloc_page(GFP_KERNEL);
305                 if (!page)
306                         return -ENOMEM;
307                 set_page_private(page, 0);
308                 cache->objects[cache->nobjs++] = page_address(page);
309         }
310         return 0;
311 }
312
313 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
314 {
315         while (mc->nobjs)
316                 free_page((unsigned long)mc->objects[--mc->nobjs]);
317 }
318
319 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
320 {
321         int r;
322
323         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
324                                    pte_chain_cache, 4);
325         if (r)
326                 goto out;
327         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
328                                    rmap_desc_cache, 4);
329         if (r)
330                 goto out;
331         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
332         if (r)
333                 goto out;
334         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
335                                    mmu_page_header_cache, 4);
336 out:
337         return r;
338 }
339
340 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
341 {
342         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
343         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
344         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
345         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
346 }
347
348 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
349                                     size_t size)
350 {
351         void *p;
352
353         BUG_ON(!mc->nobjs);
354         p = mc->objects[--mc->nobjs];
355         return p;
356 }
357
358 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
359 {
360         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
361                                       sizeof(struct kvm_pte_chain));
362 }
363
364 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
365 {
366         kfree(pc);
367 }
368
369 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
370 {
371         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
372                                       sizeof(struct kvm_rmap_desc));
373 }
374
375 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
376 {
377         kfree(rd);
378 }
379
380 /*
381  * Return the pointer to the largepage write count for a given
382  * gfn, handling slots that are not large page aligned.
383  */
384 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
385 {
386         unsigned long idx;
387
388         idx = (gfn / KVM_PAGES_PER_HPAGE) -
389               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
390         return &slot->lpage_info[idx].write_count;
391 }
392
393 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
394 {
395         int *write_count;
396
397         gfn = unalias_gfn(kvm, gfn);
398         write_count = slot_largepage_idx(gfn,
399                                          gfn_to_memslot_unaliased(kvm, gfn));
400         *write_count += 1;
401 }
402
403 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
404 {
405         int *write_count;
406
407         gfn = unalias_gfn(kvm, gfn);
408         write_count = slot_largepage_idx(gfn,
409                                          gfn_to_memslot_unaliased(kvm, gfn));
410         *write_count -= 1;
411         WARN_ON(*write_count < 0);
412 }
413
414 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
415 {
416         struct kvm_memory_slot *slot;
417         int *largepage_idx;
418
419         gfn = unalias_gfn(kvm, gfn);
420         slot = gfn_to_memslot_unaliased(kvm, gfn);
421         if (slot) {
422                 largepage_idx = slot_largepage_idx(gfn, slot);
423                 return *largepage_idx;
424         }
425
426         return 1;
427 }
428
429 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
430 {
431         struct vm_area_struct *vma;
432         unsigned long addr;
433         int ret = 0;
434
435         addr = gfn_to_hva(kvm, gfn);
436         if (kvm_is_error_hva(addr))
437                 return ret;
438
439         down_read(&current->mm->mmap_sem);
440         vma = find_vma(current->mm, addr);
441         if (vma && is_vm_hugetlb_page(vma))
442                 ret = 1;
443         up_read(&current->mm->mmap_sem);
444
445         return ret;
446 }
447
448 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
449 {
450         struct kvm_memory_slot *slot;
451
452         if (has_wrprotected_page(vcpu->kvm, large_gfn))
453                 return 0;
454
455         if (!host_largepage_backed(vcpu->kvm, large_gfn))
456                 return 0;
457
458         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
459         if (slot && slot->dirty_bitmap)
460                 return 0;
461
462         return 1;
463 }
464
465 /*
466  * Take gfn and return the reverse mapping to it.
467  * Note: gfn must be unaliased before this function get called
468  */
469
470 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
471 {
472         struct kvm_memory_slot *slot;
473         unsigned long idx;
474
475         slot = gfn_to_memslot(kvm, gfn);
476         if (!lpage)
477                 return &slot->rmap[gfn - slot->base_gfn];
478
479         idx = (gfn / KVM_PAGES_PER_HPAGE) -
480               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
481
482         return &slot->lpage_info[idx].rmap_pde;
483 }
484
485 /*
486  * Reverse mapping data structures:
487  *
488  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
489  * that points to page_address(page).
490  *
491  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
492  * containing more mappings.
493  */
494 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
495 {
496         struct kvm_mmu_page *sp;
497         struct kvm_rmap_desc *desc;
498         unsigned long *rmapp;
499         int i;
500
501         if (!is_rmap_pte(*spte))
502                 return;
503         gfn = unalias_gfn(vcpu->kvm, gfn);
504         sp = page_header(__pa(spte));
505         sp->gfns[spte - sp->spt] = gfn;
506         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
507         if (!*rmapp) {
508                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
509                 *rmapp = (unsigned long)spte;
510         } else if (!(*rmapp & 1)) {
511                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
512                 desc = mmu_alloc_rmap_desc(vcpu);
513                 desc->shadow_ptes[0] = (u64 *)*rmapp;
514                 desc->shadow_ptes[1] = spte;
515                 *rmapp = (unsigned long)desc | 1;
516         } else {
517                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
518                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
519                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
520                         desc = desc->more;
521                 if (desc->shadow_ptes[RMAP_EXT-1]) {
522                         desc->more = mmu_alloc_rmap_desc(vcpu);
523                         desc = desc->more;
524                 }
525                 for (i = 0; desc->shadow_ptes[i]; ++i)
526                         ;
527                 desc->shadow_ptes[i] = spte;
528         }
529 }
530
531 static void rmap_desc_remove_entry(unsigned long *rmapp,
532                                    struct kvm_rmap_desc *desc,
533                                    int i,
534                                    struct kvm_rmap_desc *prev_desc)
535 {
536         int j;
537
538         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
539                 ;
540         desc->shadow_ptes[i] = desc->shadow_ptes[j];
541         desc->shadow_ptes[j] = NULL;
542         if (j != 0)
543                 return;
544         if (!prev_desc && !desc->more)
545                 *rmapp = (unsigned long)desc->shadow_ptes[0];
546         else
547                 if (prev_desc)
548                         prev_desc->more = desc->more;
549                 else
550                         *rmapp = (unsigned long)desc->more | 1;
551         mmu_free_rmap_desc(desc);
552 }
553
554 static void rmap_remove(struct kvm *kvm, u64 *spte)
555 {
556         struct kvm_rmap_desc *desc;
557         struct kvm_rmap_desc *prev_desc;
558         struct kvm_mmu_page *sp;
559         pfn_t pfn;
560         unsigned long *rmapp;
561         int i;
562
563         if (!is_rmap_pte(*spte))
564                 return;
565         sp = page_header(__pa(spte));
566         pfn = spte_to_pfn(*spte);
567         if (*spte & shadow_accessed_mask)
568                 kvm_set_pfn_accessed(pfn);
569         if (is_writeble_pte(*spte))
570                 kvm_release_pfn_dirty(pfn);
571         else
572                 kvm_release_pfn_clean(pfn);
573         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
574         if (!*rmapp) {
575                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
576                 BUG();
577         } else if (!(*rmapp & 1)) {
578                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
579                 if ((u64 *)*rmapp != spte) {
580                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
581                                spte, *spte);
582                         BUG();
583                 }
584                 *rmapp = 0;
585         } else {
586                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
587                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
588                 prev_desc = NULL;
589                 while (desc) {
590                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
591                                 if (desc->shadow_ptes[i] == spte) {
592                                         rmap_desc_remove_entry(rmapp,
593                                                                desc, i,
594                                                                prev_desc);
595                                         return;
596                                 }
597                         prev_desc = desc;
598                         desc = desc->more;
599                 }
600                 BUG();
601         }
602 }
603
604 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
605 {
606         struct kvm_rmap_desc *desc;
607         struct kvm_rmap_desc *prev_desc;
608         u64 *prev_spte;
609         int i;
610
611         if (!*rmapp)
612                 return NULL;
613         else if (!(*rmapp & 1)) {
614                 if (!spte)
615                         return (u64 *)*rmapp;
616                 return NULL;
617         }
618         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
619         prev_desc = NULL;
620         prev_spte = NULL;
621         while (desc) {
622                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
623                         if (prev_spte == spte)
624                                 return desc->shadow_ptes[i];
625                         prev_spte = desc->shadow_ptes[i];
626                 }
627                 desc = desc->more;
628         }
629         return NULL;
630 }
631
632 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
633 {
634         unsigned long *rmapp;
635         u64 *spte;
636         int write_protected = 0;
637
638         gfn = unalias_gfn(kvm, gfn);
639         rmapp = gfn_to_rmap(kvm, gfn, 0);
640
641         spte = rmap_next(kvm, rmapp, NULL);
642         while (spte) {
643                 BUG_ON(!spte);
644                 BUG_ON(!(*spte & PT_PRESENT_MASK));
645                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
646                 if (is_writeble_pte(*spte)) {
647                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
648                         write_protected = 1;
649                 }
650                 spte = rmap_next(kvm, rmapp, spte);
651         }
652         if (write_protected) {
653                 pfn_t pfn;
654
655                 spte = rmap_next(kvm, rmapp, NULL);
656                 pfn = spte_to_pfn(*spte);
657                 kvm_set_pfn_dirty(pfn);
658         }
659
660         /* check for huge page mappings */
661         rmapp = gfn_to_rmap(kvm, gfn, 1);
662         spte = rmap_next(kvm, rmapp, NULL);
663         while (spte) {
664                 BUG_ON(!spte);
665                 BUG_ON(!(*spte & PT_PRESENT_MASK));
666                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
667                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
668                 if (is_writeble_pte(*spte)) {
669                         rmap_remove(kvm, spte);
670                         --kvm->stat.lpages;
671                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
672                         spte = NULL;
673                         write_protected = 1;
674                 }
675                 spte = rmap_next(kvm, rmapp, spte);
676         }
677
678         return write_protected;
679 }
680
681 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
682 {
683         u64 *spte;
684         int need_tlb_flush = 0;
685
686         while ((spte = rmap_next(kvm, rmapp, NULL))) {
687                 BUG_ON(!(*spte & PT_PRESENT_MASK));
688                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
689                 rmap_remove(kvm, spte);
690                 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
691                 need_tlb_flush = 1;
692         }
693         return need_tlb_flush;
694 }
695
696 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
697                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
698 {
699         int i;
700         int retval = 0;
701
702         /*
703          * If mmap_sem isn't taken, we can look the memslots with only
704          * the mmu_lock by skipping over the slots with userspace_addr == 0.
705          */
706         for (i = 0; i < kvm->nmemslots; i++) {
707                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
708                 unsigned long start = memslot->userspace_addr;
709                 unsigned long end;
710
711                 /* mmu_lock protects userspace_addr */
712                 if (!start)
713                         continue;
714
715                 end = start + (memslot->npages << PAGE_SHIFT);
716                 if (hva >= start && hva < end) {
717                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
718                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
719                         retval |= handler(kvm,
720                                           &memslot->lpage_info[
721                                                   gfn_offset /
722                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
723                 }
724         }
725
726         return retval;
727 }
728
729 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
730 {
731         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
732 }
733
734 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
735 {
736         u64 *spte;
737         int young = 0;
738
739         /* always return old for EPT */
740         if (!shadow_accessed_mask)
741                 return 0;
742
743         spte = rmap_next(kvm, rmapp, NULL);
744         while (spte) {
745                 int _young;
746                 u64 _spte = *spte;
747                 BUG_ON(!(_spte & PT_PRESENT_MASK));
748                 _young = _spte & PT_ACCESSED_MASK;
749                 if (_young) {
750                         young = 1;
751                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
752                 }
753                 spte = rmap_next(kvm, rmapp, spte);
754         }
755         return young;
756 }
757
758 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
759 {
760         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
761 }
762
763 #ifdef MMU_DEBUG
764 static int is_empty_shadow_page(u64 *spt)
765 {
766         u64 *pos;
767         u64 *end;
768
769         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
770                 if (is_shadow_present_pte(*pos)) {
771                         printk(KERN_ERR "%s: %p %llx\n", __func__,
772                                pos, *pos);
773                         return 0;
774                 }
775         return 1;
776 }
777 #endif
778
779 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
780 {
781         ASSERT(is_empty_shadow_page(sp->spt));
782         list_del(&sp->link);
783         __free_page(virt_to_page(sp->spt));
784         __free_page(virt_to_page(sp->gfns));
785         kfree(sp);
786         ++kvm->arch.n_free_mmu_pages;
787 }
788
789 static unsigned kvm_page_table_hashfn(gfn_t gfn)
790 {
791         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
792 }
793
794 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
795                                                u64 *parent_pte)
796 {
797         struct kvm_mmu_page *sp;
798
799         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
800         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
801         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
802         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
803         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
804         INIT_LIST_HEAD(&sp->oos_link);
805         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
806         sp->multimapped = 0;
807         sp->parent_pte = parent_pte;
808         --vcpu->kvm->arch.n_free_mmu_pages;
809         return sp;
810 }
811
812 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
813                                     struct kvm_mmu_page *sp, u64 *parent_pte)
814 {
815         struct kvm_pte_chain *pte_chain;
816         struct hlist_node *node;
817         int i;
818
819         if (!parent_pte)
820                 return;
821         if (!sp->multimapped) {
822                 u64 *old = sp->parent_pte;
823
824                 if (!old) {
825                         sp->parent_pte = parent_pte;
826                         return;
827                 }
828                 sp->multimapped = 1;
829                 pte_chain = mmu_alloc_pte_chain(vcpu);
830                 INIT_HLIST_HEAD(&sp->parent_ptes);
831                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
832                 pte_chain->parent_ptes[0] = old;
833         }
834         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
835                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
836                         continue;
837                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
838                         if (!pte_chain->parent_ptes[i]) {
839                                 pte_chain->parent_ptes[i] = parent_pte;
840                                 return;
841                         }
842         }
843         pte_chain = mmu_alloc_pte_chain(vcpu);
844         BUG_ON(!pte_chain);
845         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
846         pte_chain->parent_ptes[0] = parent_pte;
847 }
848
849 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
850                                        u64 *parent_pte)
851 {
852         struct kvm_pte_chain *pte_chain;
853         struct hlist_node *node;
854         int i;
855
856         if (!sp->multimapped) {
857                 BUG_ON(sp->parent_pte != parent_pte);
858                 sp->parent_pte = NULL;
859                 return;
860         }
861         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
862                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
863                         if (!pte_chain->parent_ptes[i])
864                                 break;
865                         if (pte_chain->parent_ptes[i] != parent_pte)
866                                 continue;
867                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
868                                 && pte_chain->parent_ptes[i + 1]) {
869                                 pte_chain->parent_ptes[i]
870                                         = pte_chain->parent_ptes[i + 1];
871                                 ++i;
872                         }
873                         pte_chain->parent_ptes[i] = NULL;
874                         if (i == 0) {
875                                 hlist_del(&pte_chain->link);
876                                 mmu_free_pte_chain(pte_chain);
877                                 if (hlist_empty(&sp->parent_ptes)) {
878                                         sp->multimapped = 0;
879                                         sp->parent_pte = NULL;
880                                 }
881                         }
882                         return;
883                 }
884         BUG();
885 }
886
887
888 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
889                             mmu_parent_walk_fn fn)
890 {
891         struct kvm_pte_chain *pte_chain;
892         struct hlist_node *node;
893         struct kvm_mmu_page *parent_sp;
894         int i;
895
896         if (!sp->multimapped && sp->parent_pte) {
897                 parent_sp = page_header(__pa(sp->parent_pte));
898                 fn(vcpu, parent_sp);
899                 mmu_parent_walk(vcpu, parent_sp, fn);
900                 return;
901         }
902         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
903                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
904                         if (!pte_chain->parent_ptes[i])
905                                 break;
906                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
907                         fn(vcpu, parent_sp);
908                         mmu_parent_walk(vcpu, parent_sp, fn);
909                 }
910 }
911
912 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
913 {
914         unsigned int index;
915         struct kvm_mmu_page *sp = page_header(__pa(spte));
916
917         index = spte - sp->spt;
918         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
919                 sp->unsync_children++;
920         WARN_ON(!sp->unsync_children);
921 }
922
923 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
924 {
925         struct kvm_pte_chain *pte_chain;
926         struct hlist_node *node;
927         int i;
928
929         if (!sp->parent_pte)
930                 return;
931
932         if (!sp->multimapped) {
933                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
934                 return;
935         }
936
937         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
938                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
939                         if (!pte_chain->parent_ptes[i])
940                                 break;
941                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
942                 }
943 }
944
945 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
946 {
947         kvm_mmu_update_parents_unsync(sp);
948         return 1;
949 }
950
951 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
952                                         struct kvm_mmu_page *sp)
953 {
954         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
955         kvm_mmu_update_parents_unsync(sp);
956 }
957
958 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
959                                     struct kvm_mmu_page *sp)
960 {
961         int i;
962
963         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
964                 sp->spt[i] = shadow_trap_nonpresent_pte;
965 }
966
967 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
968                                struct kvm_mmu_page *sp)
969 {
970         return 1;
971 }
972
973 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
974 {
975 }
976
977 #define KVM_PAGE_ARRAY_NR 16
978
979 struct kvm_mmu_pages {
980         struct mmu_page_and_offset {
981                 struct kvm_mmu_page *sp;
982                 unsigned int idx;
983         } page[KVM_PAGE_ARRAY_NR];
984         unsigned int nr;
985 };
986
987 #define for_each_unsync_children(bitmap, idx)           \
988         for (idx = find_first_bit(bitmap, 512);         \
989              idx < 512;                                 \
990              idx = find_next_bit(bitmap, 512, idx+1))
991
992 int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
993                    int idx)
994 {
995         int i;
996
997         if (sp->unsync)
998                 for (i=0; i < pvec->nr; i++)
999                         if (pvec->page[i].sp == sp)
1000                                 return 0;
1001
1002         pvec->page[pvec->nr].sp = sp;
1003         pvec->page[pvec->nr].idx = idx;
1004         pvec->nr++;
1005         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1006 }
1007
1008 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1009                            struct kvm_mmu_pages *pvec)
1010 {
1011         int i, ret, nr_unsync_leaf = 0;
1012
1013         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1014                 u64 ent = sp->spt[i];
1015
1016                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1017                         struct kvm_mmu_page *child;
1018                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1019
1020                         if (child->unsync_children) {
1021                                 if (mmu_pages_add(pvec, child, i))
1022                                         return -ENOSPC;
1023
1024                                 ret = __mmu_unsync_walk(child, pvec);
1025                                 if (!ret)
1026                                         __clear_bit(i, sp->unsync_child_bitmap);
1027                                 else if (ret > 0)
1028                                         nr_unsync_leaf += ret;
1029                                 else
1030                                         return ret;
1031                         }
1032
1033                         if (child->unsync) {
1034                                 nr_unsync_leaf++;
1035                                 if (mmu_pages_add(pvec, child, i))
1036                                         return -ENOSPC;
1037                         }
1038                 }
1039         }
1040
1041         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1042                 sp->unsync_children = 0;
1043
1044         return nr_unsync_leaf;
1045 }
1046
1047 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1048                            struct kvm_mmu_pages *pvec)
1049 {
1050         if (!sp->unsync_children)
1051                 return 0;
1052
1053         mmu_pages_add(pvec, sp, 0);
1054         return __mmu_unsync_walk(sp, pvec);
1055 }
1056
1057 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1058 {
1059         unsigned index;
1060         struct hlist_head *bucket;
1061         struct kvm_mmu_page *sp;
1062         struct hlist_node *node;
1063
1064         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1065         index = kvm_page_table_hashfn(gfn);
1066         bucket = &kvm->arch.mmu_page_hash[index];
1067         hlist_for_each_entry(sp, node, bucket, hash_link)
1068                 if (sp->gfn == gfn && !sp->role.direct
1069                     && !sp->role.invalid) {
1070                         pgprintk("%s: found role %x\n",
1071                                  __func__, sp->role.word);
1072                         return sp;
1073                 }
1074         return NULL;
1075 }
1076
1077 static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
1078 {
1079         list_del(&sp->oos_link);
1080         --kvm->stat.mmu_unsync_global;
1081 }
1082
1083 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1084 {
1085         WARN_ON(!sp->unsync);
1086         sp->unsync = 0;
1087         if (sp->global)
1088                 kvm_unlink_unsync_global(kvm, sp);
1089         --kvm->stat.mmu_unsync;
1090 }
1091
1092 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1093
1094 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1095 {
1096         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1097                 kvm_mmu_zap_page(vcpu->kvm, sp);
1098                 return 1;
1099         }
1100
1101         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1102                 kvm_flush_remote_tlbs(vcpu->kvm);
1103         kvm_unlink_unsync_page(vcpu->kvm, sp);
1104         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1105                 kvm_mmu_zap_page(vcpu->kvm, sp);
1106                 return 1;
1107         }
1108
1109         kvm_mmu_flush_tlb(vcpu);
1110         return 0;
1111 }
1112
1113 struct mmu_page_path {
1114         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1115         unsigned int idx[PT64_ROOT_LEVEL-1];
1116 };
1117
1118 #define for_each_sp(pvec, sp, parents, i)                       \
1119                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1120                         sp = pvec.page[i].sp;                   \
1121                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1122                         i = mmu_pages_next(&pvec, &parents, i))
1123
1124 int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents,
1125                    int i)
1126 {
1127         int n;
1128
1129         for (n = i+1; n < pvec->nr; n++) {
1130                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1131
1132                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1133                         parents->idx[0] = pvec->page[n].idx;
1134                         return n;
1135                 }
1136
1137                 parents->parent[sp->role.level-2] = sp;
1138                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1139         }
1140
1141         return n;
1142 }
1143
1144 void mmu_pages_clear_parents(struct mmu_page_path *parents)
1145 {
1146         struct kvm_mmu_page *sp;
1147         unsigned int level = 0;
1148
1149         do {
1150                 unsigned int idx = parents->idx[level];
1151
1152                 sp = parents->parent[level];
1153                 if (!sp)
1154                         return;
1155
1156                 --sp->unsync_children;
1157                 WARN_ON((int)sp->unsync_children < 0);
1158                 __clear_bit(idx, sp->unsync_child_bitmap);
1159                 level++;
1160         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1161 }
1162
1163 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1164                                struct mmu_page_path *parents,
1165                                struct kvm_mmu_pages *pvec)
1166 {
1167         parents->parent[parent->role.level-1] = NULL;
1168         pvec->nr = 0;
1169 }
1170
1171 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1172                               struct kvm_mmu_page *parent)
1173 {
1174         int i;
1175         struct kvm_mmu_page *sp;
1176         struct mmu_page_path parents;
1177         struct kvm_mmu_pages pages;
1178
1179         kvm_mmu_pages_init(parent, &parents, &pages);
1180         while (mmu_unsync_walk(parent, &pages)) {
1181                 int protected = 0;
1182
1183                 for_each_sp(pages, sp, parents, i)
1184                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1185
1186                 if (protected)
1187                         kvm_flush_remote_tlbs(vcpu->kvm);
1188
1189                 for_each_sp(pages, sp, parents, i) {
1190                         kvm_sync_page(vcpu, sp);
1191                         mmu_pages_clear_parents(&parents);
1192                 }
1193                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1194                 kvm_mmu_pages_init(parent, &parents, &pages);
1195         }
1196 }
1197
1198 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1199                                              gfn_t gfn,
1200                                              gva_t gaddr,
1201                                              unsigned level,
1202                                              int direct,
1203                                              unsigned access,
1204                                              u64 *parent_pte)
1205 {
1206         union kvm_mmu_page_role role;
1207         unsigned index;
1208         unsigned quadrant;
1209         struct hlist_head *bucket;
1210         struct kvm_mmu_page *sp;
1211         struct hlist_node *node, *tmp;
1212
1213         role = vcpu->arch.mmu.base_role;
1214         role.level = level;
1215         role.direct = direct;
1216         role.access = access;
1217         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1218                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1219                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1220                 role.quadrant = quadrant;
1221         }
1222         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1223                  gfn, role.word);
1224         index = kvm_page_table_hashfn(gfn);
1225         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1226         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1227                 if (sp->gfn == gfn) {
1228                         if (sp->unsync)
1229                                 if (kvm_sync_page(vcpu, sp))
1230                                         continue;
1231
1232                         if (sp->role.word != role.word)
1233                                 continue;
1234
1235                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1236                         if (sp->unsync_children) {
1237                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1238                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1239                         }
1240                         pgprintk("%s: found\n", __func__);
1241                         return sp;
1242                 }
1243         ++vcpu->kvm->stat.mmu_cache_miss;
1244         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1245         if (!sp)
1246                 return sp;
1247         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1248         sp->gfn = gfn;
1249         sp->role = role;
1250         sp->global = role.cr4_pge;
1251         hlist_add_head(&sp->hash_link, bucket);
1252         if (!direct) {
1253                 if (rmap_write_protect(vcpu->kvm, gfn))
1254                         kvm_flush_remote_tlbs(vcpu->kvm);
1255                 account_shadowed(vcpu->kvm, gfn);
1256         }
1257         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1258                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1259         else
1260                 nonpaging_prefetch_page(vcpu, sp);
1261         return sp;
1262 }
1263
1264 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1265                              struct kvm_vcpu *vcpu, u64 addr)
1266 {
1267         iterator->addr = addr;
1268         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1269         iterator->level = vcpu->arch.mmu.shadow_root_level;
1270         if (iterator->level == PT32E_ROOT_LEVEL) {
1271                 iterator->shadow_addr
1272                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1273                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1274                 --iterator->level;
1275                 if (!iterator->shadow_addr)
1276                         iterator->level = 0;
1277         }
1278 }
1279
1280 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1281 {
1282         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1283                 return false;
1284         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1285         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1286         return true;
1287 }
1288
1289 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1290 {
1291         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1292         --iterator->level;
1293 }
1294
1295 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1296                                          struct kvm_mmu_page *sp)
1297 {
1298         unsigned i;
1299         u64 *pt;
1300         u64 ent;
1301
1302         pt = sp->spt;
1303
1304         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1305                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1306                         if (is_shadow_present_pte(pt[i]))
1307                                 rmap_remove(kvm, &pt[i]);
1308                         pt[i] = shadow_trap_nonpresent_pte;
1309                 }
1310                 return;
1311         }
1312
1313         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1314                 ent = pt[i];
1315
1316                 if (is_shadow_present_pte(ent)) {
1317                         if (!is_large_pte(ent)) {
1318                                 ent &= PT64_BASE_ADDR_MASK;
1319                                 mmu_page_remove_parent_pte(page_header(ent),
1320                                                            &pt[i]);
1321                         } else {
1322                                 --kvm->stat.lpages;
1323                                 rmap_remove(kvm, &pt[i]);
1324                         }
1325                 }
1326                 pt[i] = shadow_trap_nonpresent_pte;
1327         }
1328 }
1329
1330 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1331 {
1332         mmu_page_remove_parent_pte(sp, parent_pte);
1333 }
1334
1335 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1336 {
1337         int i;
1338
1339         for (i = 0; i < KVM_MAX_VCPUS; ++i)
1340                 if (kvm->vcpus[i])
1341                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
1342 }
1343
1344 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1345 {
1346         u64 *parent_pte;
1347
1348         while (sp->multimapped || sp->parent_pte) {
1349                 if (!sp->multimapped)
1350                         parent_pte = sp->parent_pte;
1351                 else {
1352                         struct kvm_pte_chain *chain;
1353
1354                         chain = container_of(sp->parent_ptes.first,
1355                                              struct kvm_pte_chain, link);
1356                         parent_pte = chain->parent_ptes[0];
1357                 }
1358                 BUG_ON(!parent_pte);
1359                 kvm_mmu_put_page(sp, parent_pte);
1360                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1361         }
1362 }
1363
1364 static int mmu_zap_unsync_children(struct kvm *kvm,
1365                                    struct kvm_mmu_page *parent)
1366 {
1367         int i, zapped = 0;
1368         struct mmu_page_path parents;
1369         struct kvm_mmu_pages pages;
1370
1371         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1372                 return 0;
1373
1374         kvm_mmu_pages_init(parent, &parents, &pages);
1375         while (mmu_unsync_walk(parent, &pages)) {
1376                 struct kvm_mmu_page *sp;
1377
1378                 for_each_sp(pages, sp, parents, i) {
1379                         kvm_mmu_zap_page(kvm, sp);
1380                         mmu_pages_clear_parents(&parents);
1381                 }
1382                 zapped += pages.nr;
1383                 kvm_mmu_pages_init(parent, &parents, &pages);
1384         }
1385
1386         return zapped;
1387 }
1388
1389 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1390 {
1391         int ret;
1392         ++kvm->stat.mmu_shadow_zapped;
1393         ret = mmu_zap_unsync_children(kvm, sp);
1394         kvm_mmu_page_unlink_children(kvm, sp);
1395         kvm_mmu_unlink_parents(kvm, sp);
1396         kvm_flush_remote_tlbs(kvm);
1397         if (!sp->role.invalid && !sp->role.direct)
1398                 unaccount_shadowed(kvm, sp->gfn);
1399         if (sp->unsync)
1400                 kvm_unlink_unsync_page(kvm, sp);
1401         if (!sp->root_count) {
1402                 hlist_del(&sp->hash_link);
1403                 kvm_mmu_free_page(kvm, sp);
1404         } else {
1405                 sp->role.invalid = 1;
1406                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1407                 kvm_reload_remote_mmus(kvm);
1408         }
1409         kvm_mmu_reset_last_pte_updated(kvm);
1410         return ret;
1411 }
1412
1413 /*
1414  * Changing the number of mmu pages allocated to the vm
1415  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1416  */
1417 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1418 {
1419         /*
1420          * If we set the number of mmu pages to be smaller be than the
1421          * number of actived pages , we must to free some mmu pages before we
1422          * change the value
1423          */
1424
1425         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1426             kvm_nr_mmu_pages) {
1427                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1428                                        - kvm->arch.n_free_mmu_pages;
1429
1430                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1431                         struct kvm_mmu_page *page;
1432
1433                         page = container_of(kvm->arch.active_mmu_pages.prev,
1434                                             struct kvm_mmu_page, link);
1435                         kvm_mmu_zap_page(kvm, page);
1436                         n_used_mmu_pages--;
1437                 }
1438                 kvm->arch.n_free_mmu_pages = 0;
1439         }
1440         else
1441                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1442                                          - kvm->arch.n_alloc_mmu_pages;
1443
1444         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1445 }
1446
1447 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1448 {
1449         unsigned index;
1450         struct hlist_head *bucket;
1451         struct kvm_mmu_page *sp;
1452         struct hlist_node *node, *n;
1453         int r;
1454
1455         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1456         r = 0;
1457         index = kvm_page_table_hashfn(gfn);
1458         bucket = &kvm->arch.mmu_page_hash[index];
1459         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1460                 if (sp->gfn == gfn && !sp->role.direct) {
1461                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1462                                  sp->role.word);
1463                         r = 1;
1464                         if (kvm_mmu_zap_page(kvm, sp))
1465                                 n = bucket->first;
1466                 }
1467         return r;
1468 }
1469
1470 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1471 {
1472         unsigned index;
1473         struct hlist_head *bucket;
1474         struct kvm_mmu_page *sp;
1475         struct hlist_node *node, *nn;
1476
1477         index = kvm_page_table_hashfn(gfn);
1478         bucket = &kvm->arch.mmu_page_hash[index];
1479         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1480                 if (sp->gfn == gfn && !sp->role.direct
1481                     && !sp->role.invalid) {
1482                         pgprintk("%s: zap %lx %x\n",
1483                                  __func__, gfn, sp->role.word);
1484                         kvm_mmu_zap_page(kvm, sp);
1485                 }
1486         }
1487 }
1488
1489 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1490 {
1491         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1492         struct kvm_mmu_page *sp = page_header(__pa(pte));
1493
1494         __set_bit(slot, sp->slot_bitmap);
1495 }
1496
1497 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1498 {
1499         int i;
1500         u64 *pt = sp->spt;
1501
1502         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1503                 return;
1504
1505         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1506                 if (pt[i] == shadow_notrap_nonpresent_pte)
1507                         set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
1508         }
1509 }
1510
1511 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1512 {
1513         struct page *page;
1514
1515         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1516
1517         if (gpa == UNMAPPED_GVA)
1518                 return NULL;
1519
1520         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1521
1522         return page;
1523 }
1524
1525 /*
1526  * The function is based on mtrr_type_lookup() in
1527  * arch/x86/kernel/cpu/mtrr/generic.c
1528  */
1529 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1530                          u64 start, u64 end)
1531 {
1532         int i;
1533         u64 base, mask;
1534         u8 prev_match, curr_match;
1535         int num_var_ranges = KVM_NR_VAR_MTRR;
1536
1537         if (!mtrr_state->enabled)
1538                 return 0xFF;
1539
1540         /* Make end inclusive end, instead of exclusive */
1541         end--;
1542
1543         /* Look in fixed ranges. Just return the type as per start */
1544         if (mtrr_state->have_fixed && (start < 0x100000)) {
1545                 int idx;
1546
1547                 if (start < 0x80000) {
1548                         idx = 0;
1549                         idx += (start >> 16);
1550                         return mtrr_state->fixed_ranges[idx];
1551                 } else if (start < 0xC0000) {
1552                         idx = 1 * 8;
1553                         idx += ((start - 0x80000) >> 14);
1554                         return mtrr_state->fixed_ranges[idx];
1555                 } else if (start < 0x1000000) {
1556                         idx = 3 * 8;
1557                         idx += ((start - 0xC0000) >> 12);
1558                         return mtrr_state->fixed_ranges[idx];
1559                 }
1560         }
1561
1562         /*
1563          * Look in variable ranges
1564          * Look of multiple ranges matching this address and pick type
1565          * as per MTRR precedence
1566          */
1567         if (!(mtrr_state->enabled & 2))
1568                 return mtrr_state->def_type;
1569
1570         prev_match = 0xFF;
1571         for (i = 0; i < num_var_ranges; ++i) {
1572                 unsigned short start_state, end_state;
1573
1574                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1575                         continue;
1576
1577                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1578                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1579                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1580                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1581
1582                 start_state = ((start & mask) == (base & mask));
1583                 end_state = ((end & mask) == (base & mask));
1584                 if (start_state != end_state)
1585                         return 0xFE;
1586
1587                 if ((start & mask) != (base & mask))
1588                         continue;
1589
1590                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1591                 if (prev_match == 0xFF) {
1592                         prev_match = curr_match;
1593                         continue;
1594                 }
1595
1596                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1597                     curr_match == MTRR_TYPE_UNCACHABLE)
1598                         return MTRR_TYPE_UNCACHABLE;
1599
1600                 if ((prev_match == MTRR_TYPE_WRBACK &&
1601                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1602                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1603                      curr_match == MTRR_TYPE_WRBACK)) {
1604                         prev_match = MTRR_TYPE_WRTHROUGH;
1605                         curr_match = MTRR_TYPE_WRTHROUGH;
1606                 }
1607
1608                 if (prev_match != curr_match)
1609                         return MTRR_TYPE_UNCACHABLE;
1610         }
1611
1612         if (prev_match != 0xFF)
1613                 return prev_match;
1614
1615         return mtrr_state->def_type;
1616 }
1617
1618 static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1619 {
1620         u8 mtrr;
1621
1622         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1623                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1624         if (mtrr == 0xfe || mtrr == 0xff)
1625                 mtrr = MTRR_TYPE_WRBACK;
1626         return mtrr;
1627 }
1628
1629 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1630 {
1631         unsigned index;
1632         struct hlist_head *bucket;
1633         struct kvm_mmu_page *s;
1634         struct hlist_node *node, *n;
1635
1636         index = kvm_page_table_hashfn(sp->gfn);
1637         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1638         /* don't unsync if pagetable is shadowed with multiple roles */
1639         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1640                 if (s->gfn != sp->gfn || s->role.direct)
1641                         continue;
1642                 if (s->role.word != sp->role.word)
1643                         return 1;
1644         }
1645         ++vcpu->kvm->stat.mmu_unsync;
1646         sp->unsync = 1;
1647
1648         if (sp->global) {
1649                 list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
1650                 ++vcpu->kvm->stat.mmu_unsync_global;
1651         } else
1652                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1653
1654         mmu_convert_notrap(sp);
1655         return 0;
1656 }
1657
1658 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1659                                   bool can_unsync)
1660 {
1661         struct kvm_mmu_page *shadow;
1662
1663         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1664         if (shadow) {
1665                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1666                         return 1;
1667                 if (shadow->unsync)
1668                         return 0;
1669                 if (can_unsync && oos_shadow)
1670                         return kvm_unsync_page(vcpu, shadow);
1671                 return 1;
1672         }
1673         return 0;
1674 }
1675
1676 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1677                     unsigned pte_access, int user_fault,
1678                     int write_fault, int dirty, int largepage,
1679                     int global, gfn_t gfn, pfn_t pfn, bool speculative,
1680                     bool can_unsync)
1681 {
1682         u64 spte;
1683         int ret = 0;
1684         u64 mt_mask = shadow_mt_mask;
1685         struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1686
1687         if (!global && sp->global) {
1688                 sp->global = 0;
1689                 if (sp->unsync) {
1690                         kvm_unlink_unsync_global(vcpu->kvm, sp);
1691                         kvm_mmu_mark_parents_unsync(vcpu, sp);
1692                 }
1693         }
1694
1695         /*
1696          * We don't set the accessed bit, since we sometimes want to see
1697          * whether the guest actually used the pte (in order to detect
1698          * demand paging).
1699          */
1700         spte = shadow_base_present_pte | shadow_dirty_mask;
1701         if (!speculative)
1702                 spte |= shadow_accessed_mask;
1703         if (!dirty)
1704                 pte_access &= ~ACC_WRITE_MASK;
1705         if (pte_access & ACC_EXEC_MASK)
1706                 spte |= shadow_x_mask;
1707         else
1708                 spte |= shadow_nx_mask;
1709         if (pte_access & ACC_USER_MASK)
1710                 spte |= shadow_user_mask;
1711         if (largepage)
1712                 spte |= PT_PAGE_SIZE_MASK;
1713         if (mt_mask) {
1714                 if (!kvm_is_mmio_pfn(pfn)) {
1715                         mt_mask = get_memory_type(vcpu, gfn) <<
1716                                 kvm_x86_ops->get_mt_mask_shift();
1717                         mt_mask |= VMX_EPT_IGMT_BIT;
1718                 } else
1719                         mt_mask = MTRR_TYPE_UNCACHABLE <<
1720                                 kvm_x86_ops->get_mt_mask_shift();
1721                 spte |= mt_mask;
1722         }
1723
1724         spte |= (u64)pfn << PAGE_SHIFT;
1725
1726         if ((pte_access & ACC_WRITE_MASK)
1727             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1728
1729                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1730                         ret = 1;
1731                         spte = shadow_trap_nonpresent_pte;
1732                         goto set_pte;
1733                 }
1734
1735                 spte |= PT_WRITABLE_MASK;
1736
1737                 /*
1738                  * Optimization: for pte sync, if spte was writable the hash
1739                  * lookup is unnecessary (and expensive). Write protection
1740                  * is responsibility of mmu_get_page / kvm_sync_page.
1741                  * Same reasoning can be applied to dirty page accounting.
1742                  */
1743                 if (!can_unsync && is_writeble_pte(*shadow_pte))
1744                         goto set_pte;
1745
1746                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1747                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1748                                  __func__, gfn);
1749                         ret = 1;
1750                         pte_access &= ~ACC_WRITE_MASK;
1751                         if (is_writeble_pte(spte))
1752                                 spte &= ~PT_WRITABLE_MASK;
1753                 }
1754         }
1755
1756         if (pte_access & ACC_WRITE_MASK)
1757                 mark_page_dirty(vcpu->kvm, gfn);
1758
1759 set_pte:
1760         set_shadow_pte(shadow_pte, spte);
1761         return ret;
1762 }
1763
1764 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1765                          unsigned pt_access, unsigned pte_access,
1766                          int user_fault, int write_fault, int dirty,
1767                          int *ptwrite, int largepage, int global,
1768                          gfn_t gfn, pfn_t pfn, bool speculative)
1769 {
1770         int was_rmapped = 0;
1771         int was_writeble = is_writeble_pte(*shadow_pte);
1772
1773         pgprintk("%s: spte %llx access %x write_fault %d"
1774                  " user_fault %d gfn %lx\n",
1775                  __func__, *shadow_pte, pt_access,
1776                  write_fault, user_fault, gfn);
1777
1778         if (is_rmap_pte(*shadow_pte)) {
1779                 /*
1780                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1781                  * the parent of the now unreachable PTE.
1782                  */
1783                 if (largepage && !is_large_pte(*shadow_pte)) {
1784                         struct kvm_mmu_page *child;
1785                         u64 pte = *shadow_pte;
1786
1787                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1788                         mmu_page_remove_parent_pte(child, shadow_pte);
1789                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1790                         pgprintk("hfn old %lx new %lx\n",
1791                                  spte_to_pfn(*shadow_pte), pfn);
1792                         rmap_remove(vcpu->kvm, shadow_pte);
1793                 } else
1794                         was_rmapped = 1;
1795         }
1796         if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1797                       dirty, largepage, global, gfn, pfn, speculative, true)) {
1798                 if (write_fault)
1799                         *ptwrite = 1;
1800                 kvm_x86_ops->tlb_flush(vcpu);
1801         }
1802
1803         pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1804         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1805                  is_large_pte(*shadow_pte)? "2MB" : "4kB",
1806                  is_present_pte(*shadow_pte)?"RW":"R", gfn,
1807                  *shadow_pte, shadow_pte);
1808         if (!was_rmapped && is_large_pte(*shadow_pte))
1809                 ++vcpu->kvm->stat.lpages;
1810
1811         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1812         if (!was_rmapped) {
1813                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1814                 if (!is_rmap_pte(*shadow_pte))
1815                         kvm_release_pfn_clean(pfn);
1816         } else {
1817                 if (was_writeble)
1818                         kvm_release_pfn_dirty(pfn);
1819                 else
1820                         kvm_release_pfn_clean(pfn);
1821         }
1822         if (speculative) {
1823                 vcpu->arch.last_pte_updated = shadow_pte;
1824                 vcpu->arch.last_pte_gfn = gfn;
1825         }
1826 }
1827
1828 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1829 {
1830 }
1831
1832 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1833                         int largepage, gfn_t gfn, pfn_t pfn)
1834 {
1835         struct kvm_shadow_walk_iterator iterator;
1836         struct kvm_mmu_page *sp;
1837         int pt_write = 0;
1838         gfn_t pseudo_gfn;
1839
1840         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1841                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1842                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1843                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1844                                      0, write, 1, &pt_write,
1845                                      largepage, 0, gfn, pfn, false);
1846                         ++vcpu->stat.pf_fixed;
1847                         break;
1848                 }
1849
1850                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1851                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1852                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1853                                               iterator.level - 1,
1854                                               1, ACC_ALL, iterator.sptep);
1855                         if (!sp) {
1856                                 pgprintk("nonpaging_map: ENOMEM\n");
1857                                 kvm_release_pfn_clean(pfn);
1858                                 return -ENOMEM;
1859                         }
1860
1861                         set_shadow_pte(iterator.sptep,
1862                                        __pa(sp->spt)
1863                                        | PT_PRESENT_MASK | PT_WRITABLE_MASK
1864                                        | shadow_user_mask | shadow_x_mask);
1865                 }
1866         }
1867         return pt_write;
1868 }
1869
1870 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1871 {
1872         int r;
1873         int largepage = 0;
1874         pfn_t pfn;
1875         unsigned long mmu_seq;
1876
1877         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1878                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1879                 largepage = 1;
1880         }
1881
1882         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1883         smp_rmb();
1884         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1885
1886         /* mmio */
1887         if (is_error_pfn(pfn)) {
1888                 kvm_release_pfn_clean(pfn);
1889                 return 1;
1890         }
1891
1892         spin_lock(&vcpu->kvm->mmu_lock);
1893         if (mmu_notifier_retry(vcpu, mmu_seq))
1894                 goto out_unlock;
1895         kvm_mmu_free_some_pages(vcpu);
1896         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1897         spin_unlock(&vcpu->kvm->mmu_lock);
1898
1899
1900         return r;
1901
1902 out_unlock:
1903         spin_unlock(&vcpu->kvm->mmu_lock);
1904         kvm_release_pfn_clean(pfn);
1905         return 0;
1906 }
1907
1908
1909 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1910 {
1911         int i;
1912         struct kvm_mmu_page *sp;
1913
1914         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1915                 return;
1916         spin_lock(&vcpu->kvm->mmu_lock);
1917         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1918                 hpa_t root = vcpu->arch.mmu.root_hpa;
1919
1920                 sp = page_header(root);
1921                 --sp->root_count;
1922                 if (!sp->root_count && sp->role.invalid)
1923                         kvm_mmu_zap_page(vcpu->kvm, sp);
1924                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1925                 spin_unlock(&vcpu->kvm->mmu_lock);
1926                 return;
1927         }
1928         for (i = 0; i < 4; ++i) {
1929                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1930
1931                 if (root) {
1932                         root &= PT64_BASE_ADDR_MASK;
1933                         sp = page_header(root);
1934                         --sp->root_count;
1935                         if (!sp->root_count && sp->role.invalid)
1936                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1937                 }
1938                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1939         }
1940         spin_unlock(&vcpu->kvm->mmu_lock);
1941         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1942 }
1943
1944 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1945 {
1946         int i;
1947         gfn_t root_gfn;
1948         struct kvm_mmu_page *sp;
1949         int direct = 0;
1950
1951         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1952
1953         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1954                 hpa_t root = vcpu->arch.mmu.root_hpa;
1955
1956                 ASSERT(!VALID_PAGE(root));
1957                 if (tdp_enabled)
1958                         direct = 1;
1959                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1960                                       PT64_ROOT_LEVEL, direct,
1961                                       ACC_ALL, NULL);
1962                 root = __pa(sp->spt);
1963                 ++sp->root_count;
1964                 vcpu->arch.mmu.root_hpa = root;
1965                 return;
1966         }
1967         direct = !is_paging(vcpu);
1968         if (tdp_enabled)
1969                 direct = 1;
1970         for (i = 0; i < 4; ++i) {
1971                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1972
1973                 ASSERT(!VALID_PAGE(root));
1974                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1975                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1976                                 vcpu->arch.mmu.pae_root[i] = 0;
1977                                 continue;
1978                         }
1979                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1980                 } else if (vcpu->arch.mmu.root_level == 0)
1981                         root_gfn = 0;
1982                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1983                                       PT32_ROOT_LEVEL, direct,
1984                                       ACC_ALL, NULL);
1985                 root = __pa(sp->spt);
1986                 ++sp->root_count;
1987                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1988         }
1989         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1990 }
1991
1992 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1993 {
1994         int i;
1995         struct kvm_mmu_page *sp;
1996
1997         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1998                 return;
1999         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2000                 hpa_t root = vcpu->arch.mmu.root_hpa;
2001                 sp = page_header(root);
2002                 mmu_sync_children(vcpu, sp);
2003                 return;
2004         }
2005         for (i = 0; i < 4; ++i) {
2006                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2007
2008                 if (root) {
2009                         root &= PT64_BASE_ADDR_MASK;
2010                         sp = page_header(root);
2011                         mmu_sync_children(vcpu, sp);
2012                 }
2013         }
2014 }
2015
2016 static void mmu_sync_global(struct kvm_vcpu *vcpu)
2017 {
2018         struct kvm *kvm = vcpu->kvm;
2019         struct kvm_mmu_page *sp, *n;
2020
2021         list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
2022                 kvm_sync_page(vcpu, sp);
2023 }
2024
2025 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2026 {
2027         spin_lock(&vcpu->kvm->mmu_lock);
2028         mmu_sync_roots(vcpu);
2029         spin_unlock(&vcpu->kvm->mmu_lock);
2030 }
2031
2032 void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
2033 {
2034         spin_lock(&vcpu->kvm->mmu_lock);
2035         mmu_sync_global(vcpu);
2036         spin_unlock(&vcpu->kvm->mmu_lock);
2037 }
2038
2039 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2040 {
2041         return vaddr;
2042 }
2043
2044 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2045                                 u32 error_code)
2046 {
2047         gfn_t gfn;
2048         int r;
2049
2050         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2051         r = mmu_topup_memory_caches(vcpu);
2052         if (r)
2053                 return r;
2054
2055         ASSERT(vcpu);
2056         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2057
2058         gfn = gva >> PAGE_SHIFT;
2059
2060         return nonpaging_map(vcpu, gva & PAGE_MASK,
2061                              error_code & PFERR_WRITE_MASK, gfn);
2062 }
2063
2064 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2065                                 u32 error_code)
2066 {
2067         pfn_t pfn;
2068         int r;
2069         int largepage = 0;
2070         gfn_t gfn = gpa >> PAGE_SHIFT;
2071         unsigned long mmu_seq;
2072
2073         ASSERT(vcpu);
2074         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2075
2076         r = mmu_topup_memory_caches(vcpu);
2077         if (r)
2078                 return r;
2079
2080         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2081                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2082                 largepage = 1;
2083         }
2084         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2085         smp_rmb();
2086         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2087         if (is_error_pfn(pfn)) {
2088                 kvm_release_pfn_clean(pfn);
2089                 return 1;
2090         }
2091         spin_lock(&vcpu->kvm->mmu_lock);
2092         if (mmu_notifier_retry(vcpu, mmu_seq))
2093                 goto out_unlock;
2094         kvm_mmu_free_some_pages(vcpu);
2095         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2096                          largepage, gfn, pfn);
2097         spin_unlock(&vcpu->kvm->mmu_lock);
2098
2099         return r;
2100
2101 out_unlock:
2102         spin_unlock(&vcpu->kvm->mmu_lock);
2103         kvm_release_pfn_clean(pfn);
2104         return 0;
2105 }
2106
2107 static void nonpaging_free(struct kvm_vcpu *vcpu)
2108 {
2109         mmu_free_roots(vcpu);
2110 }
2111
2112 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2113 {
2114         struct kvm_mmu *context = &vcpu->arch.mmu;
2115
2116         context->new_cr3 = nonpaging_new_cr3;
2117         context->page_fault = nonpaging_page_fault;
2118         context->gva_to_gpa = nonpaging_gva_to_gpa;
2119         context->free = nonpaging_free;
2120         context->prefetch_page = nonpaging_prefetch_page;
2121         context->sync_page = nonpaging_sync_page;
2122         context->invlpg = nonpaging_invlpg;
2123         context->root_level = 0;
2124         context->shadow_root_level = PT32E_ROOT_LEVEL;
2125         context->root_hpa = INVALID_PAGE;
2126         return 0;
2127 }
2128
2129 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2130 {
2131         ++vcpu->stat.tlb_flush;
2132         kvm_x86_ops->tlb_flush(vcpu);
2133 }
2134
2135 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2136 {
2137         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2138         mmu_free_roots(vcpu);
2139 }
2140
2141 static void inject_page_fault(struct kvm_vcpu *vcpu,
2142                               u64 addr,
2143                               u32 err_code)
2144 {
2145         kvm_inject_page_fault(vcpu, addr, err_code);
2146 }
2147
2148 static void paging_free(struct kvm_vcpu *vcpu)
2149 {
2150         nonpaging_free(vcpu);
2151 }
2152
2153 #define PTTYPE 64
2154 #include "paging_tmpl.h"
2155 #undef PTTYPE
2156
2157 #define PTTYPE 32
2158 #include "paging_tmpl.h"
2159 #undef PTTYPE
2160
2161 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2162 {
2163         struct kvm_mmu *context = &vcpu->arch.mmu;
2164
2165         ASSERT(is_pae(vcpu));
2166         context->new_cr3 = paging_new_cr3;
2167         context->page_fault = paging64_page_fault;
2168         context->gva_to_gpa = paging64_gva_to_gpa;
2169         context->prefetch_page = paging64_prefetch_page;
2170         context->sync_page = paging64_sync_page;
2171         context->invlpg = paging64_invlpg;
2172         context->free = paging_free;
2173         context->root_level = level;
2174         context->shadow_root_level = level;
2175         context->root_hpa = INVALID_PAGE;
2176         return 0;
2177 }
2178
2179 static int paging64_init_context(struct kvm_vcpu *vcpu)
2180 {
2181         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2182 }
2183
2184 static int paging32_init_context(struct kvm_vcpu *vcpu)
2185 {
2186         struct kvm_mmu *context = &vcpu->arch.mmu;
2187
2188         context->new_cr3 = paging_new_cr3;
2189         context->page_fault = paging32_page_fault;
2190         context->gva_to_gpa = paging32_gva_to_gpa;
2191         context->free = paging_free;
2192         context->prefetch_page = paging32_prefetch_page;
2193         context->sync_page = paging32_sync_page;
2194         context->invlpg = paging32_invlpg;
2195         context->root_level = PT32_ROOT_LEVEL;
2196         context->shadow_root_level = PT32E_ROOT_LEVEL;
2197         context->root_hpa = INVALID_PAGE;
2198         return 0;
2199 }
2200
2201 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2202 {
2203         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2204 }
2205
2206 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2207 {
2208         struct kvm_mmu *context = &vcpu->arch.mmu;
2209
2210         context->new_cr3 = nonpaging_new_cr3;
2211         context->page_fault = tdp_page_fault;
2212         context->free = nonpaging_free;
2213         context->prefetch_page = nonpaging_prefetch_page;
2214         context->sync_page = nonpaging_sync_page;
2215         context->invlpg = nonpaging_invlpg;
2216         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2217         context->root_hpa = INVALID_PAGE;
2218
2219         if (!is_paging(vcpu)) {
2220                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2221                 context->root_level = 0;
2222         } else if (is_long_mode(vcpu)) {
2223                 context->gva_to_gpa = paging64_gva_to_gpa;
2224                 context->root_level = PT64_ROOT_LEVEL;
2225         } else if (is_pae(vcpu)) {
2226                 context->gva_to_gpa = paging64_gva_to_gpa;
2227                 context->root_level = PT32E_ROOT_LEVEL;
2228         } else {
2229                 context->gva_to_gpa = paging32_gva_to_gpa;
2230                 context->root_level = PT32_ROOT_LEVEL;
2231         }
2232
2233         return 0;
2234 }
2235
2236 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2237 {
2238         int r;
2239
2240         ASSERT(vcpu);
2241         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2242
2243         if (!is_paging(vcpu))
2244                 r = nonpaging_init_context(vcpu);
2245         else if (is_long_mode(vcpu))
2246                 r = paging64_init_context(vcpu);
2247         else if (is_pae(vcpu))
2248                 r = paging32E_init_context(vcpu);
2249         else
2250                 r = paging32_init_context(vcpu);
2251
2252         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2253
2254         return r;
2255 }
2256
2257 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2258 {
2259         vcpu->arch.update_pte.pfn = bad_pfn;
2260
2261         if (tdp_enabled)
2262                 return init_kvm_tdp_mmu(vcpu);
2263         else
2264                 return init_kvm_softmmu(vcpu);
2265 }
2266
2267 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2268 {
2269         ASSERT(vcpu);
2270         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2271                 vcpu->arch.mmu.free(vcpu);
2272                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2273         }
2274 }
2275
2276 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2277 {
2278         destroy_kvm_mmu(vcpu);
2279         return init_kvm_mmu(vcpu);
2280 }
2281 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2282
2283 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2284 {
2285         int r;
2286
2287         r = mmu_topup_memory_caches(vcpu);
2288         if (r)
2289                 goto out;
2290         spin_lock(&vcpu->kvm->mmu_lock);
2291         kvm_mmu_free_some_pages(vcpu);
2292         mmu_alloc_roots(vcpu);
2293         mmu_sync_roots(vcpu);
2294         spin_unlock(&vcpu->kvm->mmu_lock);
2295         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2296         kvm_mmu_flush_tlb(vcpu);
2297 out:
2298         return r;
2299 }
2300 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2301
2302 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2303 {
2304         mmu_free_roots(vcpu);
2305 }
2306
2307 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2308                                   struct kvm_mmu_page *sp,
2309                                   u64 *spte)
2310 {
2311         u64 pte;
2312         struct kvm_mmu_page *child;
2313
2314         pte = *spte;
2315         if (is_shadow_present_pte(pte)) {
2316                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2317                     is_large_pte(pte))
2318                         rmap_remove(vcpu->kvm, spte);
2319                 else {
2320                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2321                         mmu_page_remove_parent_pte(child, spte);
2322                 }
2323         }
2324         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
2325         if (is_large_pte(pte))
2326                 --vcpu->kvm->stat.lpages;
2327 }
2328
2329 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2330                                   struct kvm_mmu_page *sp,
2331                                   u64 *spte,
2332                                   const void *new)
2333 {
2334         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2335                 if (!vcpu->arch.update_pte.largepage ||
2336                     sp->role.glevels == PT32_ROOT_LEVEL) {
2337                         ++vcpu->kvm->stat.mmu_pde_zapped;
2338                         return;
2339                 }
2340         }
2341
2342         ++vcpu->kvm->stat.mmu_pte_updated;
2343         if (sp->role.glevels == PT32_ROOT_LEVEL)
2344                 paging32_update_pte(vcpu, sp, spte, new);
2345         else
2346                 paging64_update_pte(vcpu, sp, spte, new);
2347 }
2348
2349 static bool need_remote_flush(u64 old, u64 new)
2350 {
2351         if (!is_shadow_present_pte(old))
2352                 return false;
2353         if (!is_shadow_present_pte(new))
2354                 return true;
2355         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2356                 return true;
2357         old ^= PT64_NX_MASK;
2358         new ^= PT64_NX_MASK;
2359         return (old & ~new & PT64_PERM_MASK) != 0;
2360 }
2361
2362 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2363 {
2364         if (need_remote_flush(old, new))
2365                 kvm_flush_remote_tlbs(vcpu->kvm);
2366         else
2367                 kvm_mmu_flush_tlb(vcpu);
2368 }
2369
2370 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2371 {
2372         u64 *spte = vcpu->arch.last_pte_updated;
2373
2374         return !!(spte && (*spte & shadow_accessed_mask));
2375 }
2376
2377 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2378                                           const u8 *new, int bytes)
2379 {
2380         gfn_t gfn;
2381         int r;
2382         u64 gpte = 0;
2383         pfn_t pfn;
2384
2385         vcpu->arch.update_pte.largepage = 0;
2386
2387         if (bytes != 4 && bytes != 8)
2388                 return;
2389
2390         /*
2391          * Assume that the pte write on a page table of the same type
2392          * as the current vcpu paging mode.  This is nearly always true
2393          * (might be false while changing modes).  Note it is verified later
2394          * by update_pte().
2395          */
2396         if (is_pae(vcpu)) {
2397                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2398                 if ((bytes == 4) && (gpa % 4 == 0)) {
2399                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2400                         if (r)
2401                                 return;
2402                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2403                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2404                         memcpy((void *)&gpte, new, 8);
2405                 }
2406         } else {
2407                 if ((bytes == 4) && (gpa % 4 == 0))
2408                         memcpy((void *)&gpte, new, 4);
2409         }
2410         if (!is_present_pte(gpte))
2411                 return;
2412         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2413
2414         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2415                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2416                 vcpu->arch.update_pte.largepage = 1;
2417         }
2418         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2419         smp_rmb();
2420         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2421
2422         if (is_error_pfn(pfn)) {
2423                 kvm_release_pfn_clean(pfn);
2424                 return;
2425         }
2426         vcpu->arch.update_pte.gfn = gfn;
2427         vcpu->arch.update_pte.pfn = pfn;
2428 }
2429
2430 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2431 {
2432         u64 *spte = vcpu->arch.last_pte_updated;
2433
2434         if (spte
2435             && vcpu->arch.last_pte_gfn == gfn
2436             && shadow_accessed_mask
2437             && !(*spte & shadow_accessed_mask)
2438             && is_shadow_present_pte(*spte))
2439                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2440 }
2441
2442 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2443                        const u8 *new, int bytes,
2444                        bool guest_initiated)
2445 {
2446         gfn_t gfn = gpa >> PAGE_SHIFT;
2447         struct kvm_mmu_page *sp;
2448         struct hlist_node *node, *n;
2449         struct hlist_head *bucket;
2450         unsigned index;
2451         u64 entry, gentry;
2452         u64 *spte;
2453         unsigned offset = offset_in_page(gpa);
2454         unsigned pte_size;
2455         unsigned page_offset;
2456         unsigned misaligned;
2457         unsigned quadrant;
2458         int level;
2459         int flooded = 0;
2460         int npte;
2461         int r;
2462
2463         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2464         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2465         spin_lock(&vcpu->kvm->mmu_lock);
2466         kvm_mmu_access_page(vcpu, gfn);
2467         kvm_mmu_free_some_pages(vcpu);
2468         ++vcpu->kvm->stat.mmu_pte_write;
2469         kvm_mmu_audit(vcpu, "pre pte write");
2470         if (guest_initiated) {
2471                 if (gfn == vcpu->arch.last_pt_write_gfn
2472                     && !last_updated_pte_accessed(vcpu)) {
2473                         ++vcpu->arch.last_pt_write_count;
2474                         if (vcpu->arch.last_pt_write_count >= 3)
2475                                 flooded = 1;
2476                 } else {
2477                         vcpu->arch.last_pt_write_gfn = gfn;
2478                         vcpu->arch.last_pt_write_count = 1;
2479                         vcpu->arch.last_pte_updated = NULL;
2480                 }
2481         }
2482         index = kvm_page_table_hashfn(gfn);
2483         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2484         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2485                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2486                         continue;
2487                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2488                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2489                 misaligned |= bytes < 4;
2490                 if (misaligned || flooded) {
2491                         /*
2492                          * Misaligned accesses are too much trouble to fix
2493                          * up; also, they usually indicate a page is not used
2494                          * as a page table.
2495                          *
2496                          * If we're seeing too many writes to a page,
2497                          * it may no longer be a page table, or we may be
2498                          * forking, in which case it is better to unmap the
2499                          * page.
2500                          */
2501                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2502                                  gpa, bytes, sp->role.word);
2503                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2504                                 n = bucket->first;
2505                         ++vcpu->kvm->stat.mmu_flooded;
2506                         continue;
2507                 }
2508                 page_offset = offset;
2509                 level = sp->role.level;
2510                 npte = 1;
2511                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2512                         page_offset <<= 1;      /* 32->64 */
2513                         /*
2514                          * A 32-bit pde maps 4MB while the shadow pdes map
2515                          * only 2MB.  So we need to double the offset again
2516                          * and zap two pdes instead of one.
2517                          */
2518                         if (level == PT32_ROOT_LEVEL) {
2519                                 page_offset &= ~7; /* kill rounding error */
2520                                 page_offset <<= 1;
2521                                 npte = 2;
2522                         }
2523                         quadrant = page_offset >> PAGE_SHIFT;
2524                         page_offset &= ~PAGE_MASK;
2525                         if (quadrant != sp->role.quadrant)
2526                                 continue;
2527                 }
2528                 spte = &sp->spt[page_offset / sizeof(*spte)];
2529                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2530                         gentry = 0;
2531                         r = kvm_read_guest_atomic(vcpu->kvm,
2532                                                   gpa & ~(u64)(pte_size - 1),
2533                                                   &gentry, pte_size);
2534                         new = (const void *)&gentry;
2535                         if (r < 0)
2536                                 new = NULL;
2537                 }
2538                 while (npte--) {
2539                         entry = *spte;
2540                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2541                         if (new)
2542                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2543                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2544                         ++spte;
2545                 }
2546         }
2547         kvm_mmu_audit(vcpu, "post pte write");
2548         spin_unlock(&vcpu->kvm->mmu_lock);
2549         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2550                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2551                 vcpu->arch.update_pte.pfn = bad_pfn;
2552         }
2553 }
2554
2555 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2556 {
2557         gpa_t gpa;
2558         int r;
2559
2560         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2561
2562         spin_lock(&vcpu->kvm->mmu_lock);
2563         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2564         spin_unlock(&vcpu->kvm->mmu_lock);
2565         return r;
2566 }
2567 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2568
2569 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2570 {
2571         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2572                 struct kvm_mmu_page *sp;
2573
2574                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2575                                   struct kvm_mmu_page, link);
2576                 kvm_mmu_zap_page(vcpu->kvm, sp);
2577                 ++vcpu->kvm->stat.mmu_recycled;
2578         }
2579 }
2580
2581 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2582 {
2583         int r;
2584         enum emulation_result er;
2585
2586         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2587         if (r < 0)
2588                 goto out;
2589
2590         if (!r) {
2591                 r = 1;
2592                 goto out;
2593         }
2594
2595         r = mmu_topup_memory_caches(vcpu);
2596         if (r)
2597                 goto out;
2598
2599         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2600
2601         switch (er) {
2602         case EMULATE_DONE:
2603                 return 1;
2604         case EMULATE_DO_MMIO:
2605                 ++vcpu->stat.mmio_exits;
2606                 return 0;
2607         case EMULATE_FAIL:
2608                 kvm_report_emulation_failure(vcpu, "pagetable");
2609                 return 1;
2610         default:
2611                 BUG();
2612         }
2613 out:
2614         return r;
2615 }
2616 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2617
2618 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2619 {
2620         vcpu->arch.mmu.invlpg(vcpu, gva);
2621         kvm_mmu_flush_tlb(vcpu);
2622         ++vcpu->stat.invlpg;
2623 }
2624 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2625
2626 void kvm_enable_tdp(void)
2627 {
2628         tdp_enabled = true;
2629 }
2630 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2631
2632 void kvm_disable_tdp(void)
2633 {
2634         tdp_enabled = false;
2635 }
2636 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2637
2638 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2639 {
2640         struct kvm_mmu_page *sp;
2641
2642         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2643                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
2644                                   struct kvm_mmu_page, link);
2645                 kvm_mmu_zap_page(vcpu->kvm, sp);
2646                 cond_resched();
2647         }
2648         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2649 }
2650
2651 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2652 {
2653         struct page *page;
2654         int i;
2655
2656         ASSERT(vcpu);
2657
2658         if (vcpu->kvm->arch.n_requested_mmu_pages)
2659                 vcpu->kvm->arch.n_free_mmu_pages =
2660                                         vcpu->kvm->arch.n_requested_mmu_pages;
2661         else
2662                 vcpu->kvm->arch.n_free_mmu_pages =
2663                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2664         /*
2665          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2666          * Therefore we need to allocate shadow page tables in the first
2667          * 4GB of memory, which happens to fit the DMA32 zone.
2668          */
2669         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2670         if (!page)
2671                 goto error_1;
2672         vcpu->arch.mmu.pae_root = page_address(page);
2673         for (i = 0; i < 4; ++i)
2674                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2675
2676         return 0;
2677
2678 error_1:
2679         free_mmu_pages(vcpu);
2680         return -ENOMEM;
2681 }
2682
2683 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2684 {
2685         ASSERT(vcpu);
2686         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2687
2688         return alloc_mmu_pages(vcpu);
2689 }
2690
2691 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2692 {
2693         ASSERT(vcpu);
2694         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2695
2696         return init_kvm_mmu(vcpu);
2697 }
2698
2699 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2700 {
2701         ASSERT(vcpu);
2702
2703         destroy_kvm_mmu(vcpu);
2704         free_mmu_pages(vcpu);
2705         mmu_free_memory_caches(vcpu);
2706 }
2707
2708 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2709 {
2710         struct kvm_mmu_page *sp;
2711
2712         spin_lock(&kvm->mmu_lock);
2713         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2714                 int i;
2715                 u64 *pt;
2716
2717                 if (!test_bit(slot, sp->slot_bitmap))
2718                         continue;
2719
2720                 pt = sp->spt;
2721                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2722                         /* avoid RMW */
2723                         if (pt[i] & PT_WRITABLE_MASK)
2724                                 pt[i] &= ~PT_WRITABLE_MASK;
2725         }
2726         kvm_flush_remote_tlbs(kvm);
2727         spin_unlock(&kvm->mmu_lock);
2728 }
2729
2730 void kvm_mmu_zap_all(struct kvm *kvm)
2731 {
2732         struct kvm_mmu_page *sp, *node;
2733
2734         spin_lock(&kvm->mmu_lock);
2735         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2736                 if (kvm_mmu_zap_page(kvm, sp))
2737                         node = container_of(kvm->arch.active_mmu_pages.next,
2738                                             struct kvm_mmu_page, link);
2739         spin_unlock(&kvm->mmu_lock);
2740
2741         kvm_flush_remote_tlbs(kvm);
2742 }
2743
2744 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2745 {
2746         struct kvm_mmu_page *page;
2747
2748         page = container_of(kvm->arch.active_mmu_pages.prev,
2749                             struct kvm_mmu_page, link);
2750         kvm_mmu_zap_page(kvm, page);
2751 }
2752
2753 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2754 {
2755         struct kvm *kvm;
2756         struct kvm *kvm_freed = NULL;
2757         int cache_count = 0;
2758
2759         spin_lock(&kvm_lock);
2760
2761         list_for_each_entry(kvm, &vm_list, vm_list) {
2762                 int npages;
2763
2764                 if (!down_read_trylock(&kvm->slots_lock))
2765                         continue;
2766                 spin_lock(&kvm->mmu_lock);
2767                 npages = kvm->arch.n_alloc_mmu_pages -
2768                          kvm->arch.n_free_mmu_pages;
2769                 cache_count += npages;
2770                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2771                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2772                         cache_count--;
2773                         kvm_freed = kvm;
2774                 }
2775                 nr_to_scan--;
2776
2777                 spin_unlock(&kvm->mmu_lock);
2778                 up_read(&kvm->slots_lock);
2779         }
2780         if (kvm_freed)
2781                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2782
2783         spin_unlock(&kvm_lock);
2784
2785         return cache_count;
2786 }
2787
2788 static struct shrinker mmu_shrinker = {
2789         .shrink = mmu_shrink,
2790         .seeks = DEFAULT_SEEKS * 10,
2791 };
2792
2793 static void mmu_destroy_caches(void)
2794 {
2795         if (pte_chain_cache)
2796                 kmem_cache_destroy(pte_chain_cache);
2797         if (rmap_desc_cache)
2798                 kmem_cache_destroy(rmap_desc_cache);
2799         if (mmu_page_header_cache)
2800                 kmem_cache_destroy(mmu_page_header_cache);
2801 }
2802
2803 void kvm_mmu_module_exit(void)
2804 {
2805         mmu_destroy_caches();
2806         unregister_shrinker(&mmu_shrinker);
2807 }
2808
2809 int kvm_mmu_module_init(void)
2810 {
2811         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2812                                             sizeof(struct kvm_pte_chain),
2813                                             0, 0, NULL);
2814         if (!pte_chain_cache)
2815                 goto nomem;
2816         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2817                                             sizeof(struct kvm_rmap_desc),
2818                                             0, 0, NULL);
2819         if (!rmap_desc_cache)
2820                 goto nomem;
2821
2822         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2823                                                   sizeof(struct kvm_mmu_page),
2824                                                   0, 0, NULL);
2825         if (!mmu_page_header_cache)
2826                 goto nomem;
2827
2828         register_shrinker(&mmu_shrinker);
2829
2830         return 0;
2831
2832 nomem:
2833         mmu_destroy_caches();
2834         return -ENOMEM;
2835 }
2836
2837 /*
2838  * Caculate mmu pages needed for kvm.
2839  */
2840 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2841 {
2842         int i;
2843         unsigned int nr_mmu_pages;
2844         unsigned int  nr_pages = 0;
2845
2846         for (i = 0; i < kvm->nmemslots; i++)
2847                 nr_pages += kvm->memslots[i].npages;
2848
2849         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2850         nr_mmu_pages = max(nr_mmu_pages,
2851                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2852
2853         return nr_mmu_pages;
2854 }
2855
2856 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2857                                 unsigned len)
2858 {
2859         if (len > buffer->len)
2860                 return NULL;
2861         return buffer->ptr;
2862 }
2863
2864 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2865                                 unsigned len)
2866 {
2867         void *ret;
2868
2869         ret = pv_mmu_peek_buffer(buffer, len);
2870         if (!ret)
2871                 return ret;
2872         buffer->ptr += len;
2873         buffer->len -= len;
2874         buffer->processed += len;
2875         return ret;
2876 }
2877
2878 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2879                              gpa_t addr, gpa_t value)
2880 {
2881         int bytes = 8;
2882         int r;
2883
2884         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2885                 bytes = 4;
2886
2887         r = mmu_topup_memory_caches(vcpu);
2888         if (r)
2889                 return r;
2890
2891         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2892                 return -EFAULT;
2893
2894         return 1;
2895 }
2896
2897 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2898 {
2899         kvm_x86_ops->tlb_flush(vcpu);
2900         set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2901         return 1;
2902 }
2903
2904 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2905 {
2906         spin_lock(&vcpu->kvm->mmu_lock);
2907         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2908         spin_unlock(&vcpu->kvm->mmu_lock);
2909         return 1;
2910 }
2911
2912 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2913                              struct kvm_pv_mmu_op_buffer *buffer)
2914 {
2915         struct kvm_mmu_op_header *header;
2916
2917         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2918         if (!header)
2919                 return 0;
2920         switch (header->op) {
2921         case KVM_MMU_OP_WRITE_PTE: {
2922                 struct kvm_mmu_op_write_pte *wpte;
2923
2924                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2925                 if (!wpte)
2926                         return 0;
2927                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2928                                         wpte->pte_val);
2929         }
2930         case KVM_MMU_OP_FLUSH_TLB: {
2931                 struct kvm_mmu_op_flush_tlb *ftlb;
2932
2933                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2934                 if (!ftlb)
2935                         return 0;
2936                 return kvm_pv_mmu_flush_tlb(vcpu);
2937         }
2938         case KVM_MMU_OP_RELEASE_PT: {
2939                 struct kvm_mmu_op_release_pt *rpt;
2940
2941                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2942                 if (!rpt)
2943                         return 0;
2944                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2945         }
2946         default: return 0;
2947         }
2948 }
2949
2950 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2951                   gpa_t addr, unsigned long *ret)
2952 {
2953         int r;
2954         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2955
2956         buffer->ptr = buffer->buf;
2957         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2958         buffer->processed = 0;
2959
2960         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2961         if (r)
2962                 goto out;
2963
2964         while (buffer->len) {
2965                 r = kvm_pv_mmu_op_one(vcpu, buffer);
2966                 if (r < 0)
2967                         goto out;
2968                 if (r == 0)
2969                         break;
2970         }
2971
2972         r = 1;
2973 out:
2974         *ret = buffer->processed;
2975         return r;
2976 }
2977
2978 #ifdef AUDIT
2979
2980 static const char *audit_msg;
2981
2982 static gva_t canonicalize(gva_t gva)
2983 {
2984 #ifdef CONFIG_X86_64
2985         gva = (long long)(gva << 16) >> 16;
2986 #endif
2987         return gva;
2988 }
2989
2990 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2991                                 gva_t va, int level)
2992 {
2993         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2994         int i;
2995         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2996
2997         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2998                 u64 ent = pt[i];
2999
3000                 if (ent == shadow_trap_nonpresent_pte)
3001                         continue;
3002
3003                 va = canonicalize(va);
3004                 if (level > 1) {
3005                         if (ent == shadow_notrap_nonpresent_pte)
3006                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
3007                                        " in nonleaf level: levels %d gva %lx"
3008                                        " level %d pte %llx\n", audit_msg,
3009                                        vcpu->arch.mmu.root_level, va, level, ent);
3010
3011                         audit_mappings_page(vcpu, ent, va, level - 1);
3012                 } else {
3013                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3014                         hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
3015
3016                         if (is_shadow_present_pte(ent)
3017                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3018                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3019                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3020                                        audit_msg, vcpu->arch.mmu.root_level,
3021                                        va, gpa, hpa, ent,
3022                                        is_shadow_present_pte(ent));
3023                         else if (ent == shadow_notrap_nonpresent_pte
3024                                  && !is_error_hpa(hpa))
3025                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3026                                        " valid guest gva %lx\n", audit_msg, va);
3027                         kvm_release_pfn_clean(pfn);
3028
3029                 }
3030         }
3031 }
3032
3033 static void audit_mappings(struct kvm_vcpu *vcpu)
3034 {
3035         unsigned i;
3036
3037         if (vcpu->arch.mmu.root_level == 4)
3038                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3039         else
3040                 for (i = 0; i < 4; ++i)
3041                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3042                                 audit_mappings_page(vcpu,
3043                                                     vcpu->arch.mmu.pae_root[i],
3044                                                     i << 30,
3045                                                     2);
3046 }
3047
3048 static int count_rmaps(struct kvm_vcpu *vcpu)
3049 {
3050         int nmaps = 0;
3051         int i, j, k;
3052
3053         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3054                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3055                 struct kvm_rmap_desc *d;
3056
3057                 for (j = 0; j < m->npages; ++j) {
3058                         unsigned long *rmapp = &m->rmap[j];
3059
3060                         if (!*rmapp)
3061                                 continue;
3062                         if (!(*rmapp & 1)) {
3063                                 ++nmaps;
3064                                 continue;
3065                         }
3066                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3067                         while (d) {
3068                                 for (k = 0; k < RMAP_EXT; ++k)
3069                                         if (d->shadow_ptes[k])
3070                                                 ++nmaps;
3071                                         else
3072                                                 break;
3073                                 d = d->more;
3074                         }
3075                 }
3076         }
3077         return nmaps;
3078 }
3079
3080 static int count_writable_mappings(struct kvm_vcpu *vcpu)
3081 {
3082         int nmaps = 0;
3083         struct kvm_mmu_page *sp;
3084         int i;
3085
3086         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3087                 u64 *pt = sp->spt;
3088
3089                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3090                         continue;
3091
3092                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3093                         u64 ent = pt[i];
3094
3095                         if (!(ent & PT_PRESENT_MASK))
3096                                 continue;
3097                         if (!(ent & PT_WRITABLE_MASK))
3098                                 continue;
3099                         ++nmaps;
3100                 }
3101         }
3102         return nmaps;
3103 }
3104
3105 static void audit_rmap(struct kvm_vcpu *vcpu)
3106 {
3107         int n_rmap = count_rmaps(vcpu);
3108         int n_actual = count_writable_mappings(vcpu);
3109
3110         if (n_rmap != n_actual)
3111                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
3112                        __func__, audit_msg, n_rmap, n_actual);
3113 }
3114
3115 static void audit_write_protection(struct kvm_vcpu *vcpu)
3116 {
3117         struct kvm_mmu_page *sp;
3118         struct kvm_memory_slot *slot;
3119         unsigned long *rmapp;
3120         gfn_t gfn;
3121
3122         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3123                 if (sp->role.direct)
3124                         continue;
3125
3126                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3127                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3128                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3129                 if (*rmapp)
3130                         printk(KERN_ERR "%s: (%s) shadow page has writable"
3131                                " mappings: gfn %lx role %x\n",
3132                                __func__, audit_msg, sp->gfn,
3133                                sp->role.word);
3134         }
3135 }
3136
3137 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3138 {
3139         int olddbg = dbg;
3140
3141         dbg = 0;
3142         audit_msg = msg;
3143         audit_rmap(vcpu);
3144         audit_write_protection(vcpu);
3145         audit_mappings(vcpu);
3146         dbg = olddbg;
3147 }
3148
3149 #endif