From 1cbea809c400661eecb538e0dd0bc4f3660f0a35 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Fri, 3 Oct 2008 14:58:09 +0800 Subject: [PATCH] KVM: ia64: Make pmt table be able to hold physical mmio entries. Don't try to do put_page once the entries are mmio. Set the tag to indicate the mmio space for vmm setting TLB's memory attribute. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index a6cf719811b..800a4f2e917 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1437,17 +1437,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm, int user_alloc) { unsigned long i; - struct page *page; + unsigned long pfn; int npages = mem->memory_size >> PAGE_SHIFT; struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; unsigned long base_gfn = memslot->base_gfn; for (i = 0; i < npages; i++) { - page = gfn_to_page(kvm, base_gfn + i); - kvm_set_pmt_entry(kvm, base_gfn + i, - page_to_pfn(page) << PAGE_SHIFT, - _PAGE_AR_RWX|_PAGE_MA_WB); - memslot->rmap[i] = (unsigned long)page; + pfn = gfn_to_pfn(kvm, base_gfn + i); + if (!kvm_is_mmio_pfn(pfn)) { + kvm_set_pmt_entry(kvm, base_gfn + i, + pfn << PAGE_SHIFT, + _PAGE_MA_WB); + memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); + } else { + kvm_set_pmt_entry(kvm, base_gfn + i, + GPFN_LOW_MMIO | (pfn << PAGE_SHIFT), + _PAGE_MA_UC); + memslot->rmap[i] = 0; + } } return 0; -- 2.41.0