struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        int round_robin_prev_vcpu;
+       unsigned int tss_addr;
 };
 
 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
        void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
        void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
                                       struct kvm_run *run);
+
+       int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 };
 
 extern struct kvm_x86_ops *kvm_x86_ops;
 
        return fd;
 }
 
+static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
+{
+       int ret;
+
+       if (addr > (unsigned int)(-3 * PAGE_SIZE))
+               return -1;
+       ret = kvm_x86_ops->set_tss_addr(kvm, addr);
+       return ret;
+}
+
 /*
  * Creates some virtual cpus.  Good luck creating more than one.
  */
        int r = -EINVAL;
 
        switch (ioctl) {
+       case KVM_SET_TSS_ADDR:
+               r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
+               if (r < 0)
+                       goto out;
+               break;
        case KVM_CREATE_VCPU:
                r = kvm_vm_ioctl_create_vcpu(kvm, arg);
                if (r < 0)
                case KVM_CAP_HLT:
                case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
                case KVM_CAP_USER_MEMORY:
+               case KVM_CAP_SET_TSS_ADDR:
                        r = 1;
                        break;
                default:
 
                control->intercept &= ~(1ULL << INTERCEPT_VINTR);
 }
 
+static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+       return 0;
+}
+
 static void save_db_regs(unsigned long *db_regs)
 {
        asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
        .set_irq = svm_set_irq,
        .inject_pending_irq = svm_intr_assist,
        .inject_pending_vectors = do_interrupt_requests,
+
+       .set_tss_addr = svm_set_tss_addr,
 };
 
 static int __init svm_init(void)
 
 
 static gva_t rmode_tss_base(struct kvm *kvm)
 {
-       gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
-       return base_gfn << PAGE_SHIFT;
+       if (!kvm->tss_addr) {
+               gfn_t base_gfn = kvm->memslots[0].base_gfn +
+                                kvm->memslots[0].npages - 3;
+               return base_gfn << PAGE_SHIFT;
+       }
+       return kvm->tss_addr;
 }
 
 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 }
 
+static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+       int ret;
+       struct kvm_userspace_memory_region tss_mem = {
+               .slot = 8,
+               .guest_phys_addr = addr,
+               .memory_size = PAGE_SIZE * 3,
+               .flags = 0,
+       };
+
+       ret = kvm_set_memory_region(kvm, &tss_mem, 0);
+       if (ret)
+               return ret;
+       kvm->tss_addr = addr;
+       return 0;
+}
+
 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
 {
        struct kvm_guest_debug *dbg = &vcpu->guest_debug;
        .set_irq = vmx_inject_irq,
        .inject_pending_irq = vmx_intr_assist,
        .inject_pending_vectors = do_interrupt_requests,
+
+       .set_tss_addr = vmx_set_tss_addr,
 };
 
 static int __init vmx_init(void)
 
 #define KVM_CAP_HLT      1
 #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
 #define KVM_CAP_USER_MEMORY 3
+#define KVM_CAP_SET_TSS_ADDR 4
 
 /*
  * ioctls for VM fds
 #define KVM_GET_NR_MMU_PAGES      _IO(KVMIO, 0x45)
 #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
                                        struct kvm_userspace_memory_region)
+#define KVM_SET_TSS_ADDR          _IO(KVMIO, 0x47)
 /*
  * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
  * a vcpu fd.