2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define shadow_walker shadow_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define shadow_walker shadow_walker32
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
55 #error Invalid PTTYPE value
58 #define gpte_to_gfn FNAME(gpte_to_gfn)
59 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
62 * The guest_walker structure emulates the behavior of the hardware page
67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
76 struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
87 static gfn_t gpte_to_gfn(pt_element_t gpte)
89 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
92 static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
94 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
97 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
98 gfn_t table_gfn, unsigned index,
99 pt_element_t orig_pte, pt_element_t new_pte)
105 down_read(¤t->mm->mmap_sem);
106 page = gfn_to_page(kvm, table_gfn);
107 up_read(¤t->mm->mmap_sem);
109 table = kmap_atomic(page, KM_USER0);
111 ret = CMPXCHG(&table[index], orig_pte, new_pte);
113 kunmap_atomic(table, KM_USER0);
115 kvm_release_page_dirty(page);
117 return (ret != orig_pte);
120 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
124 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
127 access &= ~(gpte >> PT64_NX_SHIFT);
133 * Fetch a guest pte for a guest virtual address
135 static int FNAME(walk_addr)(struct guest_walker *walker,
136 struct kvm_vcpu *vcpu, gva_t addr,
137 int write_fault, int user_fault, int fetch_fault)
141 unsigned index, pt_access, pte_access;
144 pgprintk("%s: addr %lx\n", __func__, addr);
146 walker->level = vcpu->arch.mmu.root_level;
147 pte = vcpu->arch.cr3;
149 if (!is_long_mode(vcpu)) {
150 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
151 if (!is_present_pte(pte))
156 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
157 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
162 index = PT_INDEX(addr, walker->level);
164 table_gfn = gpte_to_gfn(pte);
165 pte_gpa = gfn_to_gpa(table_gfn);
166 pte_gpa += index * sizeof(pt_element_t);
167 walker->table_gfn[walker->level - 1] = table_gfn;
168 walker->pte_gpa[walker->level - 1] = pte_gpa;
169 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
170 walker->level - 1, table_gfn);
172 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
174 if (!is_present_pte(pte))
177 if (write_fault && !is_writeble_pte(pte))
178 if (user_fault || is_write_protection(vcpu))
181 if (user_fault && !(pte & PT_USER_MASK))
185 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
189 if (!(pte & PT_ACCESSED_MASK)) {
190 mark_page_dirty(vcpu->kvm, table_gfn);
191 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
192 index, pte, pte|PT_ACCESSED_MASK))
194 pte |= PT_ACCESSED_MASK;
197 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
199 walker->ptes[walker->level - 1] = pte;
201 if (walker->level == PT_PAGE_TABLE_LEVEL) {
202 walker->gfn = gpte_to_gfn(pte);
206 if (walker->level == PT_DIRECTORY_LEVEL
207 && (pte & PT_PAGE_SIZE_MASK)
208 && (PTTYPE == 64 || is_pse(vcpu))) {
209 walker->gfn = gpte_to_gfn_pde(pte);
210 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
211 if (PTTYPE == 32 && is_cpuid_PSE36())
212 walker->gfn += pse36_gfn_delta(pte);
216 pt_access = pte_access;
220 if (write_fault && !is_dirty_pte(pte)) {
223 mark_page_dirty(vcpu->kvm, table_gfn);
224 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
228 pte |= PT_DIRTY_MASK;
229 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
230 walker->ptes[walker->level - 1] = pte;
233 walker->pt_access = pt_access;
234 walker->pte_access = pte_access;
235 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
236 __func__, (u64)pte, pt_access, pte_access);
240 walker->error_code = 0;
244 walker->error_code = PFERR_PRESENT_MASK;
248 walker->error_code |= PFERR_WRITE_MASK;
250 walker->error_code |= PFERR_USER_MASK;
252 walker->error_code |= PFERR_FETCH_MASK;
256 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
257 u64 *spte, const void *pte)
262 int largepage = vcpu->arch.update_pte.largepage;
264 gpte = *(const pt_element_t *)pte;
265 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
266 if (!is_present_pte(gpte))
267 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
270 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
271 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
272 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
274 pfn = vcpu->arch.update_pte.pfn;
275 if (is_error_pfn(pfn))
277 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
280 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
281 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
286 * Fetch a shadow pte for a specific level in the paging hierarchy.
288 static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
289 struct kvm_vcpu *vcpu, u64 addr,
290 u64 *sptep, int level)
292 struct shadow_walker *sw =
293 container_of(_sw, struct shadow_walker, walker);
294 struct guest_walker *gw = sw->guest_walker;
295 unsigned access = gw->pt_access;
296 struct kvm_mmu_page *shadow_page;
301 pt_element_t curr_pte;
303 if (level == PT_PAGE_TABLE_LEVEL
304 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
305 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
306 sw->user_fault, sw->write_fault,
307 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
308 sw->ptwrite, sw->largepage, gw->gfn, sw->pfn,
314 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
317 if (is_large_pte(*sptep))
318 rmap_remove(vcpu->kvm, sptep);
320 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
322 if (!is_dirty_pte(gw->ptes[level - 1]))
323 access &= ~ACC_WRITE_MASK;
324 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
327 table_gfn = gw->table_gfn[level - 2];
329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
330 metaphysical, access, sptep);
332 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
333 &curr_pte, sizeof(curr_pte));
334 if (r || curr_pte != gw->ptes[level - 2]) {
335 kvm_release_pfn_clean(sw->pfn);
341 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
342 | PT_WRITABLE_MASK | PT_USER_MASK;
347 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
348 struct guest_walker *guest_walker,
349 int user_fault, int write_fault, int largepage,
350 int *ptwrite, pfn_t pfn)
352 struct shadow_walker walker = {
353 .walker = { .entry = FNAME(shadow_walk_entry), },
354 .guest_walker = guest_walker,
355 .user_fault = user_fault,
356 .write_fault = write_fault,
357 .largepage = largepage,
362 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
365 walk_shadow(&walker.walker, vcpu, addr);
371 * Page fault handler. There are several causes for a page fault:
372 * - there is no shadow pte for the guest pte
373 * - write access through a shadow pte marked read only so that we can set
375 * - write access to a shadow pte marked read only so we can update the page
376 * dirty bitmap, when userspace requests it
377 * - mmio access; in this case we will never install a present shadow pte
378 * - normal guest page fault due to the guest pte marked not present, not
379 * writable, or not executable
381 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
382 * a negative value on error.
384 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
387 int write_fault = error_code & PFERR_WRITE_MASK;
388 int user_fault = error_code & PFERR_USER_MASK;
389 int fetch_fault = error_code & PFERR_FETCH_MASK;
390 struct guest_walker walker;
396 unsigned long mmu_seq;
398 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
399 kvm_mmu_audit(vcpu, "pre page fault");
401 r = mmu_topup_memory_caches(vcpu);
406 * Look up the shadow pte for the faulting address.
408 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
412 * The page is not mapped by the guest. Let the guest handle it.
415 pgprintk("%s: guest page fault\n", __func__);
416 inject_page_fault(vcpu, addr, walker.error_code);
417 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
421 down_read(¤t->mm->mmap_sem);
422 if (walker.level == PT_DIRECTORY_LEVEL) {
424 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
425 if (is_largepage_backed(vcpu, large_gfn)) {
426 walker.gfn = large_gfn;
430 mmu_seq = vcpu->kvm->mmu_notifier_seq;
431 /* implicit mb(), we'll read before PT lock is unlocked */
432 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
433 up_read(¤t->mm->mmap_sem);
436 if (is_error_pfn(pfn)) {
437 pgprintk("gfn %lx is mmio\n", walker.gfn);
438 kvm_release_pfn_clean(pfn);
442 spin_lock(&vcpu->kvm->mmu_lock);
443 if (mmu_notifier_retry(vcpu, mmu_seq))
445 kvm_mmu_free_some_pages(vcpu);
446 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
447 largepage, &write_pt, pfn);
449 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
450 shadow_pte, *shadow_pte, write_pt);
453 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
455 ++vcpu->stat.pf_fixed;
456 kvm_mmu_audit(vcpu, "post page fault (fixed)");
457 spin_unlock(&vcpu->kvm->mmu_lock);
462 spin_unlock(&vcpu->kvm->mmu_lock);
463 kvm_release_pfn_clean(pfn);
467 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
469 struct guest_walker walker;
470 gpa_t gpa = UNMAPPED_GVA;
473 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
476 gpa = gfn_to_gpa(walker.gfn);
477 gpa |= vaddr & ~PAGE_MASK;
483 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
484 struct kvm_mmu_page *sp)
487 pt_element_t pt[256 / sizeof(pt_element_t)];
490 if (sp->role.metaphysical
491 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
492 nonpaging_prefetch_page(vcpu, sp);
496 pte_gpa = gfn_to_gpa(sp->gfn);
498 offset = sp->role.quadrant << PT64_LEVEL_BITS;
499 pte_gpa += offset * sizeof(pt_element_t);
502 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
503 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
504 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
505 for (j = 0; j < ARRAY_SIZE(pt); ++j)
506 if (r || is_present_pte(pt[j]))
507 sp->spt[i+j] = shadow_trap_nonpresent_pte;
509 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
517 #undef PT_BASE_ADDR_MASK
520 #undef PT_DIR_BASE_ADDR_MASK
522 #undef PT_MAX_FULL_LEVELS
524 #undef gpte_to_gfn_pde