static void ack_flush(void *_completed)
{
- atomic_t *completed = _completed;
-
- atomic_inc(completed);
}
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- int i, cpu, needed;
+ int i, cpu;
cpumask_t cpus;
struct kvm_vcpu *vcpu;
- atomic_t completed;
- atomic_set(&completed, 0);
cpus_clear(cpus);
- needed = 0;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
continue;
cpu = vcpu->cpu;
if (cpu != -1 && cpu != raw_smp_processor_id())
- if (!cpu_isset(cpu, cpus)) {
- cpu_set(cpu, cpus);
- ++needed;
- }
- }
-
- /*
- * We really want smp_call_function_mask() here. But that's not
- * available, so ipi all cpus in parallel and wait for them
- * to complete.
- */
- for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
- smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
- while (atomic_read(&completed) != needed) {
- cpu_relax();
- barrier();
+ cpu_set(cpu, cpus);
}
+ smp_call_function_mask(cpus, ack_flush, NULL, 1);
}
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
int emulate_clts(struct kvm_vcpu *vcpu)
{
- vcpu->cr0 &= ~X86_CR0_TS;
- kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
+ kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
return X86EMUL_CONTINUE;
}
return X86EMUL_CONTINUE;
}
-static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
+void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
{
static int reported;
u8 opcodes[4];
- unsigned long rip = ctxt->vcpu->rip;
+ unsigned long rip = vcpu->rip;
unsigned long rip_linear;
- rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
+ rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
if (reported)
return;
- emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt->vcpu);
+ emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
- printk(KERN_ERR "emulation failed but !mmio_needed?"
- " rip %lx %02x %02x %02x %02x\n",
- rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+ printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+ context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
reported = 1;
}
+EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
struct x86_emulate_ops emulate_ops = {
.read_std = emulator_read_std,
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
return EMULATE_DONE;
if (!vcpu->mmio_needed) {
- report_emulation_failure(&emulate_ctxt);
+ kvm_report_emulation_failure(vcpu, "mmio");
return EMULATE_FAIL;
}
return EMULATE_DO_MMIO;
io->count -= io->cur_count;
io->cur_count = 0;
- if (!io->count)
- kvm_x86_ops->skip_emulated_instruction(vcpu);
return 0;
}
memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
kvm_x86_ops->decache_regs(vcpu);
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
pio_dev = vcpu_find_pio_dev(vcpu, port);
if (pio_dev) {
kernel_pio(pio_dev, vcpu, vcpu->pio_data);
vcpu->run->io.count = now;
vcpu->pio.cur_count = now;
+ if (vcpu->pio.cur_count == vcpu->pio.count)
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
for (i = 0; i < nr_pages; ++i) {
mutex_lock(&vcpu->kvm->lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
vcpu->guest_mode = 1;
+ kvm_guest_enter();
if (vcpu->requests)
if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
++vcpu->stat.exits;
+ /*
+ * We must have an instruction between local_irq_enable() and
+ * kvm_guest_exit(), so the timer interrupt isn't delayed by
+ * the interrupt shadow. The stat.exits increment will do nicely.
+ * But we need to prevent reordering, hence this barrier():
+ */
+ barrier();
+
+ kvm_guest_exit();
+
preempt_enable();
/*
}
static struct sysdev_class kvm_sysdev_class = {
- set_kset_name("kvm"),
+ .name = "kvm",
.suspend = kvm_suspend,
.resume = kvm_resume,
};