* space.
  *
  * Discontiguous memory is allowed, mostly for framebuffers.
+ *
+ * Must be called holding kvm->lock.
  */
-int kvm_set_memory_region(struct kvm *kvm,
-                         struct kvm_userspace_memory_region *mem,
-                         int user_alloc)
+int __kvm_set_memory_region(struct kvm *kvm,
+                           struct kvm_userspace_memory_region *mem,
+                           int user_alloc)
 {
        int r;
        gfn_t base_gfn;
        if (!npages)
                mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
 
-       mutex_lock(&kvm->lock);
-
        new = old = *memslot;
 
        new.base_gfn = base_gfn;
        /* Disallow changing a memory slot's size. */
        r = -EINVAL;
        if (npages && old.npages && npages != old.npages)
-               goto out_unlock;
+               goto out_free;
 
        /* Check for overlaps */
        r = -EEXIST;
                        continue;
                if (!((base_gfn + npages <= s->base_gfn) ||
                      (base_gfn >= s->base_gfn + s->npages)))
-                       goto out_unlock;
+                       goto out_free;
        }
 
        /* Free page dirty bitmap if unneeded */
                new.rmap = vmalloc(npages * sizeof(struct page *));
 
                if (!new.rmap)
-                       goto out_unlock;
+                       goto out_free;
 
                memset(new.rmap, 0, npages * sizeof(*new.rmap));
 
                        up_write(¤t->mm->mmap_sem);
 
                        if (IS_ERR((void *)new.userspace_addr))
-                               goto out_unlock;
+                               goto out_free;
                }
        } else {
                if (!old.user_alloc && old.rmap) {
 
                new.dirty_bitmap = vmalloc(dirty_bytes);
                if (!new.dirty_bitmap)
-                       goto out_unlock;
+                       goto out_free;
                memset(new.dirty_bitmap, 0, dirty_bytes);
        }
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        kvm_flush_remote_tlbs(kvm);
 
-       mutex_unlock(&kvm->lock);
-
        kvm_free_physmem_slot(&old, &new);
        return 0;
 
-out_unlock:
-       mutex_unlock(&kvm->lock);
+out_free:
        kvm_free_physmem_slot(&new, &old);
 out:
        return r;
 
 }
+EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
+
+int kvm_set_memory_region(struct kvm *kvm,
+                         struct kvm_userspace_memory_region *mem,
+                         int user_alloc)
+{
+       int r;
+
+       mutex_lock(&kvm->lock);
+       r = __kvm_set_memory_region(kvm, mem, user_alloc);
+       mutex_unlock(&kvm->lock);
+       return r;
+}
 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                memcpy(val, vcpu->mmio_data, bytes);
                vcpu->mmio_read_completed = 0;
                return X86EMUL_CONTINUE;
-       } else if (emulator_read_std(addr, val, bytes, vcpu)
-                  == X86EMUL_CONTINUE)
-               return X86EMUL_CONTINUE;
+       }
 
        gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+
+       /* For APIC access vmexit */
+       if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+               goto mmio;
+
+       if (emulator_read_std(addr, val, bytes, vcpu)
+                       == X86EMUL_CONTINUE)
+               return X86EMUL_CONTINUE;
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
 
+mmio:
        /*
         * Is this MMIO handled locally?
         */
                return X86EMUL_PROPAGATE_FAULT;
        }
 
+       /* For APIC access vmexit */
+       if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+               goto mmio;
+
        if (emulator_write_phys(vcpu, gpa, val, bytes))
                return X86EMUL_CONTINUE;
 
+mmio:
        /*
         * Is this MMIO handled locally?
         */
 
        u32 revision_id;
        u32 pin_based_exec_ctrl;
        u32 cpu_based_exec_ctrl;
+       u32 cpu_based_2nd_exec_ctrl;
        u32 vmexit_ctrl;
        u32 vmentry_ctrl;
 } vmcs_config;
        return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
 }
 
+static inline int cpu_has_secondary_exec_ctrls(void)
+{
+       return (vmcs_config.cpu_based_exec_ctrl &
+               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
+}
+
+static inline int vm_need_secondary_exec_ctrls(struct kvm *kvm)
+{
+       return ((cpu_has_secondary_exec_ctrls()) && (irqchip_in_kernel(kvm)));
+}
+
+static inline int cpu_has_vmx_virtualize_apic_accesses(void)
+{
+       return (vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+}
+
+static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+{
+       return ((cpu_has_vmx_virtualize_apic_accesses()) &&
+               (irqchip_in_kernel(kvm)));
+}
+
 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
        u32 min, opt;
        u32 _pin_based_exec_control = 0;
        u32 _cpu_based_exec_control = 0;
+       u32 _cpu_based_2nd_exec_control = 0;
        u32 _vmexit_control = 0;
        u32 _vmentry_control = 0;
 
              CPU_BASED_USE_IO_BITMAPS |
              CPU_BASED_MOV_DR_EXITING |
              CPU_BASED_USE_TSC_OFFSETING;
-#ifdef CONFIG_X86_64
-       opt = CPU_BASED_TPR_SHADOW;
-#else
-       opt = 0;
-#endif
+       opt = CPU_BASED_TPR_SHADOW |
+             CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
                                &_cpu_based_exec_control) < 0)
                return -EIO;
                _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
                                           ~CPU_BASED_CR8_STORE_EXITING;
 #endif
+       if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
+               min = 0;
+               opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
+                                       &_cpu_based_2nd_exec_control) < 0)
+                       return -EIO;
+       }
+#ifndef CONFIG_X86_64
+       if (!(_cpu_based_2nd_exec_control &
+                               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+               _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
+#endif
 
        min = 0;
 #ifdef CONFIG_X86_64
 
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
        vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+       vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
        vmcs_conf->vmexit_ctrl         = _vmexit_control;
        vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
        vmcs_write32(sf->ar_bytes, 0x93);
 }
 
+static int alloc_apic_access_page(struct kvm *kvm)
+{
+       struct kvm_userspace_memory_region kvm_userspace_mem;
+       int r = 0;
+
+       mutex_lock(&kvm->lock);
+       if (kvm->apic_access_page)
+               goto out;
+       kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
+       kvm_userspace_mem.flags = 0;
+       kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
+       kvm_userspace_mem.memory_size = PAGE_SIZE;
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       if (r)
+               goto out;
+       kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 /*
  * Sets up the vmcs for emulated real mode.
  */
                                CPU_BASED_CR8_LOAD_EXITING;
 #endif
        }
+       if (!vm_need_secondary_exec_ctrls(vmx->vcpu.kvm))
+               exec_control &= ~CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
 
+       if (vm_need_secondary_exec_ctrls(vmx->vcpu.kvm))
+               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                            vmcs_config.cpu_based_2nd_exec_ctrl);
+
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
        vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
        vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
+       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+               if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
+                       return -ENOMEM;
+
        return 0;
 }
 
 
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
 
-#ifdef CONFIG_X86_64
-       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
-       if (vm_need_tpr_shadow(vmx->vcpu.kvm))
-               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
-                            page_to_phys(vmx->vcpu.apic->regs_page));
-       vmcs_write32(TPR_THRESHOLD, 0);
-#endif
+       if (cpu_has_vmx_tpr_shadow()) {
+               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+               if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+                                    page_to_phys(vmx->vcpu.apic->regs_page));
+               vmcs_write32(TPR_THRESHOLD, 0);
+       }
+
+       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+               vmcs_write64(APIC_ACCESS_ADDR,
+                            page_to_phys(vmx->vcpu.kvm->apic_access_page));
 
        vmx->vcpu.cr0 = 0x60000010;
        vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
        return 1;
 }
 
+static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       u64 exit_qualification;
+       enum emulation_result er;
+       unsigned long offset;
+
+       exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+       offset = exit_qualification & 0xffful;
+
+       er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+
+       if (er !=  EMULATE_DONE) {
+               printk(KERN_ERR
+                      "Fail to handle apic access vmexit! Offset is 0x%lx\n",
+                      offset);
+               return -ENOTSUPP;
+       }
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
        [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
        [EXIT_REASON_HLT]                     = handle_halt,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
-       [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold
+       [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
+       [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
 };
 
 static const int kvm_vmx_max_exit_handlers =