*/
if ( pd != nd )
cpumask_set_cpu(cpu, nd->dirty_cpumask);
- n->dirty_cpu = cpu;
+ write_atomic(&n->dirty_cpu, cpu);
if ( !is_idle_domain(nd) )
{
if ( pd != nd )
cpumask_clear_cpu(cpu, pd->dirty_cpumask);
- p->dirty_cpu = VCPU_CPU_CLEAN;
+ write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
per_cpu(curr_vcpu, cpu) = n;
}
unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
(l1e_owner == pg_owner) )
{
+ cpumask_t *mask = this_cpu(scratch_cpumask);
+
+ cpumask_clear(mask);
+
for_each_vcpu ( pg_owner, v )
{
- if ( pv_destroy_ldt(v) )
- flush_tlb_mask(cpumask_of(v->dirty_cpu));
+ unsigned int cpu;
+
+ if ( !pv_destroy_ldt(v) )
+ continue;
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
}
+
+ if ( !cpumask_empty(mask) )
+ flush_tlb_mask(mask);
}
put_page(page);
}
while ( vmask )
{
+ unsigned int cpu;
+
vcpu_id = find_first_set_bit(vmask);
vmask &= ~(1UL << vcpu_id);
vcpu_id += vcpu_bias;
if ( (vcpu_id >= d->max_vcpus) )
return 0;
- if ( ((v = d->vcpu[vcpu_id]) != NULL) && vcpu_cpu_dirty(v) )
- __cpumask_set_cpu(v->dirty_cpu, pmask);
+ if ( (v = d->vcpu[vcpu_id]) == NULL )
+ continue;
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, pmask);
}
}
}
atomic_read(&v->domain->pause_count));
}
-static inline bool vcpu_cpu_dirty(const struct vcpu *v)
+static inline bool is_vcpu_dirty_cpu(unsigned int cpu)
{
BUILD_BUG_ON(NR_CPUS >= VCPU_CPU_CLEAN);
- return v->dirty_cpu != VCPU_CPU_CLEAN;
+ return cpu != VCPU_CPU_CLEAN;
+}
+
+static inline bool vcpu_cpu_dirty(const struct vcpu *v)
+{
+ return is_vcpu_dirty_cpu(v->dirty_cpu);
}
void vcpu_block(void);