}
 EXPORT_SYMBOL_GPL(save_msrs);
 
+static void complete_pio(struct kvm_vcpu *vcpu)
+{
+       struct kvm_io *io = &vcpu->run->io;
+       long delta;
+
+       kvm_arch_ops->cache_regs(vcpu);
+
+       if (!io->string) {
+               if (io->direction == KVM_EXIT_IO_IN)
+                       memcpy(&vcpu->regs[VCPU_REGS_RAX], &io->value,
+                              io->size);
+       } else {
+               delta = 1;
+               if (io->rep) {
+                       delta *= io->count;
+                       /*
+                        * The size of the register should really depend on
+                        * current address size.
+                        */
+                       vcpu->regs[VCPU_REGS_RCX] -= delta;
+               }
+               if (io->string_down)
+                       delta = -delta;
+               delta *= io->size;
+               if (io->direction == KVM_EXIT_IO_IN)
+                       vcpu->regs[VCPU_REGS_RDI] += delta;
+               else
+                       vcpu->regs[VCPU_REGS_RSI] += delta;
+       }
+
+       vcpu->pio_pending = 0;
+       vcpu->run->io_completed = 0;
+
+       kvm_arch_ops->decache_regs(vcpu);
+
+       kvm_arch_ops->skip_emulated_instruction(vcpu);
+}
+
 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
                kvm_run->emulated = 0;
        }
 
-       if (kvm_run->mmio_completed) {
-               memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
-               vcpu->mmio_read_completed = 1;
+       if (kvm_run->io_completed) {
+               if (vcpu->pio_pending)
+                       complete_pio(vcpu);
+               else {
+                       memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+                       vcpu->mmio_read_completed = 1;
+               }
        }
 
        vcpu->mmio_needed = 0;
 
        kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT);
        kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0;
        kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0;
+       kvm_run->io.count = 1;
 
        if (kvm_run->io.string) {
                unsigned addr_mask;
                }
        } else
                kvm_run->io.value = vcpu->svm->vmcb->save.rax;
+       vcpu->pio_pending = 1;
        return 0;
 }
 
 
                = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
        kvm_run->io.rep = (exit_qualification & 32) != 0;
        kvm_run->io.port = exit_qualification >> 16;
+       kvm_run->io.count = 1;
        if (kvm_run->io.string) {
                if (!get_io_count(vcpu, &kvm_run->io.count))
                        return 1;
                kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS);
        } else
                kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */
+       vcpu->pio_pending = 1;
        return 0;
 }
 
 
 #include <asm/types.h>
 #include <linux/ioctl.h>
 
-#define KVM_API_VERSION 5
+#define KVM_API_VERSION 6
 
 /*
  * Architectural interrupt line count, and the size of the bitmap needed
 struct kvm_run {
        /* in */
        __u32 emulated;  /* skip current instruction */
-       __u32 mmio_completed; /* mmio request completed */
+       __u32 io_completed; /* mmio/pio request completed */
        __u8 request_interrupt_window;
        __u8 padding1[7];
 
                        __u32 error_code;
                } ex;
                /* KVM_EXIT_IO */
-               struct {
+               struct kvm_io {
 #define KVM_EXIT_IO_IN  0
 #define KVM_EXIT_IO_OUT 1
                        __u8 direction;