{
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       ++vcpu->stat.nmi_injections;
        if (vcpu->arch.rmode.active) {
                vmx->rmode.irq.pending = true;
                vmx->rmode.irq.vector = NMI_VECTOR;
 {
        vmx_update_window_states(vcpu);
 
+       if (cpu_has_virtual_nmis()) {
+               if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
+                       if (vcpu->arch.nmi_window_open) {
+                               vcpu->arch.nmi_pending = false;
+                               vcpu->arch.nmi_injected = true;
+                       } else {
+                               enable_nmi_window(vcpu);
+                               return;
+                       }
+               }
+               if (vcpu->arch.nmi_injected) {
+                       vmx_inject_nmi(vcpu);
+                       if (vcpu->arch.nmi_pending
+                           || kvm_run->request_nmi_window)
+                               enable_nmi_window(vcpu);
+                       else if (vcpu->arch.irq_summary
+                                || kvm_run->request_interrupt_window)
+                               enable_irq_window(vcpu);
+                       return;
+               }
+               if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window)
+                       enable_nmi_window(vcpu);
+       }
+
        if (vcpu->arch.interrupt_window_open) {
                if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
                        kvm_do_inject_irq(vcpu);
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
        ++vcpu->stat.nmi_window_exits;
 
+       /*
+        * If the user space waits to inject a NMI, exit as soon as possible
+        */
+       if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) {
+               kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN;
+               return 0;
+       }
+
        return 1;
 }