return 1;
}
-static void pt_lock(struct periodic_time *pt)
+static void pt_vcpu_lock(struct vcpu *v)
{
- struct vcpu *v;
+ read_lock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+}
- for ( ; ; )
- {
- v = pt->vcpu;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
- if ( likely(pt->vcpu == v) )
- break;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
- }
+static void pt_vcpu_unlock(struct vcpu *v)
+{
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ read_unlock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
+}
+
+static void pt_lock(struct periodic_time *pt)
+{
+ /*
+ * We cannot use pt_vcpu_lock here, because we need to acquire the
+ * per-domain lock first and then (re-)fetch the value of pt->vcpu, or
+ * else we might be using a stale value of pt->vcpu.
+ */
+ read_lock(&pt->vcpu->domain->arch.hvm_domain.pl_time->pt_migrate);
+ spin_lock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
}
static void pt_unlock(struct periodic_time *pt)
{
- spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(pt->vcpu);
}
static void pt_process_missed_ticks(struct periodic_time *pt)
if ( v->pause_flags & VPF_blocked )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_lock(v);
list_for_each_entry ( pt, head, list )
if ( !pt->do_not_freeze )
pt_freeze_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
}
void pt_restore_timer(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_lock(v);
list_for_each_entry ( pt, head, list )
{
pt_thaw_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
}
static void pt_timer_fn(void *data)
uint64_t max_lag;
int irq, pt_vector = -1;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_lock(v);
earliest_pt = NULL;
max_lag = -1ULL;
if ( earliest_pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
return -1;
}
earliest_pt->irq_issued = 1;
irq = earliest_pt->irq;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
switch ( earliest_pt->source )
{
if ( intack.source == hvm_intsrc_vector )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_lock(v);
pt = is_pt_irq(v, intack);
if ( pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
return;
}
cb = pt->cb;
cb_priv = pt->priv;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
if ( cb != NULL )
cb(v, cb_priv);
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_lock(v);
list_for_each_entry ( pt, head, list )
migrate_timer(&pt->timer, v->processor);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ pt_vcpu_unlock(v);
}
void create_periodic_time(
destroy_periodic_time(pt);
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ write_lock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
pt->pending_intr_nr = 0;
pt->do_not_freeze = 0;
init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
set_timer(&pt->timer, pt->scheduled);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ write_unlock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
}
void destroy_periodic_time(struct periodic_time *pt)
static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v)
{
- int on_list;
-
ASSERT(pt->source == PTSRC_isa || pt->source == PTSRC_ioapic);
if ( pt->vcpu == NULL )
return;
- pt_lock(pt);
- on_list = pt->on_list;
- if ( pt->on_list )
- list_del(&pt->list);
- pt->on_list = 0;
- pt_unlock(pt);
-
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ write_lock(&pt->vcpu->domain->arch.hvm_domain.pl_time->pt_migrate);
pt->vcpu = v;
- if ( on_list )
+ if ( pt->on_list )
{
- pt->on_list = 1;
+ list_del(&pt->list);
list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
-
migrate_timer(&pt->timer, v->processor);
}
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ write_unlock(&pt->vcpu->domain->arch.hvm_domain.pl_time->pt_migrate);
}
void pt_adjust_global_vcpu_target(struct vcpu *v)