if (!apic)
                return;
-       apic_set_tpr(apic, ((cr8 & 0x0f) << 4));
+       apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
+                    | (apic_get_reg(apic, APIC_TASKPRI) & 4));
 }
 
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
                hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
 }
 EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
+
+void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+{
+       u32 data;
+       void *vapic;
+
+       if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+               return;
+
+       vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+       data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+       kunmap_atomic(vapic, KM_USER0);
+
+       apic_set_tpr(vcpu->arch.apic, data & 0xff);
+}
+
+void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+{
+       u32 data, tpr;
+       int max_irr, max_isr;
+       struct kvm_lapic *apic;
+       void *vapic;
+
+       if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+               return;
+
+       apic = vcpu->arch.apic;
+       tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
+       max_irr = apic_find_highest_irr(apic);
+       if (max_irr < 0)
+               max_irr = 0;
+       max_isr = apic_find_highest_isr(apic);
+       if (max_isr < 0)
+               max_isr = 0;
+       data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+       vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+       *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+       kunmap_atomic(vapic, KM_USER0);
+}
+
+void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+{
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       vcpu->arch.apic->vapic_addr = vapic_addr;
+}
 
        struct kvm_vcpu *vcpu;
        struct page *regs_page;
        void *regs;
+       gpa_t vapic_addr;
+       struct page *vapic_page;
 };
 int kvm_create_lapic(struct kvm_vcpu *vcpu);
 void kvm_free_lapic(struct kvm_vcpu *vcpu);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
 void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
 
+void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
 #endif
 
                r = 0;
                break;
        };
+       case KVM_SET_VAPIC_ADDR: {
+               struct kvm_vapic_addr va;
+
+               r = -EINVAL;
+               if (!irqchip_in_kernel(vcpu->kvm))
+                       goto out;
+               r = -EFAULT;
+               if (copy_from_user(&va, argp, sizeof va))
+                       goto out;
+               r = 0;
+               kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+               break;
+       }
        default:
                r = -EINVAL;
        }
        }
 
        switch (nr) {
+       case KVM_HC_VAPIC_POLL_IRQ:
+               ret = 0;
+               break;
        default:
                ret = -KVM_ENOSYS;
                break;
                                         vcpu->arch.irq_summary == 0);
 }
 
+static void vapic_enter(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+       struct page *page;
+
+       if (!apic || !apic->vapic_addr)
+               return;
+
+       page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+       vcpu->arch.apic->vapic_page = page;
+}
+
+static void vapic_exit(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       if (!apic || !apic->vapic_addr)
+               return;
+
+       kvm_release_page_dirty(apic->vapic_page);
+       mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+}
+
 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
                vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
        }
 
+       vapic_enter(vcpu);
+
 preempted:
        if (vcpu->guest_debug.enabled)
                kvm_x86_ops->guest_debug_pre(vcpu);
        if (unlikely(r))
                goto out;
 
+       if (vcpu->requests)
+               if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
+                                      &vcpu->requests)) {
+                       kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
+                       r = 0;
+                       goto out;
+               }
+
        kvm_inject_pending_timer_irqs(vcpu);
 
        preempt_disable();
        else
                kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
 
+       kvm_lapic_sync_to_vapic(vcpu);
+
        vcpu->guest_mode = 1;
        kvm_guest_enter();
 
        if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
                vcpu->arch.exception.pending = false;
 
+       kvm_lapic_sync_from_vapic(vcpu);
+
        r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
 
        if (r > 0) {
 
        post_kvm_run_save(vcpu, kvm_run);
 
+       vapic_exit(vcpu);
+
        return r;
 }
 
 
        __u32 reserved[8];
 };
 
+/* for KVM_SET_VAPIC_ADDR */
+struct kvm_vapic_addr {
+       __u64 vapic_addr;
+};
+
 #define KVMIO 0xAE
 
 /*
 #define KVM_GET_CPUID2            _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
 /* Available with KVM_CAP_VAPIC */
 #define KVM_TPR_ACCESS_REPORTING  _IOWR(KVMIO,  0x92, struct kvm_tpr_access_ctl)
+/* Available with KVM_CAP_VAPIC */
+#define KVM_SET_VAPIC_ADDR        _IOW(KVMIO,  0x93, struct kvm_vapic_addr)
 
 #endif
 
 /* Return values for hypercalls */
 #define KVM_ENOSYS             1000
 
+#define KVM_HC_VAPIC_POLL_IRQ            1
+
 #ifdef __KERNEL__
 /*
  * hypercalls use architecture specific