Common halt logic was changed by x86 and did not update ia64. This patch
updates halt for ia64.
Fixes a regression causing guests to hang with more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
- unsigned long timer_pending;
+ unsigned int timer_pending;
+ unsigned int timer_fired;
unsigned long vrr[8];
unsigned long ibr[8];
unsigned long vrr[8];
unsigned long ibr[8];
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+ if (irqchip_in_kernel(vcpu->kvm)) {
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
+
+ expires = div64_u64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+
+ down_read(&vcpu->kvm->slots_lock);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
- if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
+
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
again:
preempt_disable();
again:
preempt_disable();
-
- kvm_prepare_guest_switch(vcpu);
local_irq_disable();
if (signal_pending(current)) {
local_irq_disable();
if (signal_pending(current)) {
vcpu->guest_mode = 1;
kvm_guest_enter();
vcpu->guest_mode = 1;
kvm_guest_enter();
+ down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
* But we need to prevent reordering, hence this barrier():
*/
barrier();
* But we need to prevent reordering, hence this barrier():
*/
barrier();
+ up_read(&vcpu->kvm->slots_lock);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu);
return -EAGAIN;
}
vcpu_put(vcpu);
return -EAGAIN;
}
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
- q = &vcpu->wq;
- if (waitqueue_active(q)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
wake_up_interruptible(q);
+ vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
+ if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- }
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
+ return vcpu->arch.timer_fired;
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+static void prepare_for_halt(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_pending = 1;
+ vcpu->arch.timer_fired = 0;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
break;
case PAL_HALT_LIGHT:
{
break;
case PAL_HALT_LIGHT:
{
- vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result);
INIT_PAL_STATUS_SUCCESS(result);
+ prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v, itv)
& 0xff);
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v, itv)
& 0xff);
} else {
v->arch.timer_pending = 1;
}
} else {
v->arch.timer_pending = 1;
}