ASSERT(is_idle_vcpu(v));
/* TODO
- cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+ cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
v->dirty_cpu = v->processor;
*/
struct vcpu *v = current;
ASSERT(is_idle_vcpu(v));
- cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+ cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
v->dirty_cpu = v->processor;
reset_stack_and_jump(idle_loop);
* which is synchronised on that function.
*/
if ( pd != nd )
- cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
+ cpumask_set_cpu(cpu, nd->dirty_cpumask);
n->dirty_cpu = cpu;
if ( !is_idle_domain(nd) )
}
if ( pd != nd )
- cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
+ cpumask_clear_cpu(cpu, pd->dirty_cpumask);
p->dirty_cpu = VCPU_CPU_CLEAN;
per_cpu(curr_vcpu, cpu) = n;
int ret;
struct vcpu *v;
- BUG_ON(!cpumask_empty(d->domain_dirty_cpumask));
+ BUG_ON(!cpumask_empty(d->dirty_cpumask));
switch ( d->arch.relmem )
{
paging_update_cr3(v);
/* Flush all dirty TLBs. */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
/* Done. */
for_each_vcpu ( d, v )
wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2));
/* flush TLB */
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(v->domain->dirty_cpumask);
return 1;
}
cpumask_t *mask = this_cpu(scratch_cpumask);
BUG_ON(in_irq());
- cpumask_copy(mask, d->domain_dirty_cpumask);
+ cpumask_copy(mask, d->dirty_cpumask);
/* Don't flush if the timestamp is old enough */
tlbflush_filter(mask, page->tlbflush_timestamp);
case MMUEXT_TLB_FLUSH_ALL:
if ( likely(currd == pg_owner) )
- flush_tlb_mask(currd->domain_dirty_cpumask);
+ flush_tlb_mask(currd->dirty_cpumask);
else
rc = -EPERM;
break;
if ( unlikely(currd != pg_owner) )
rc = -EPERM;
else if ( __addr_ok(op.arg1.linear_addr) )
- flush_tlb_one_mask(currd->domain_dirty_cpumask,
- op.arg1.linear_addr);
+ flush_tlb_one_mask(currd->dirty_cpumask, op.arg1.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
unsigned int cpu = smp_processor_id();
cpumask_t *mask = per_cpu(scratch_cpumask, cpu);
- cpumask_andnot(mask, pt_owner->domain_dirty_cpumask, cpumask_of(cpu));
+ cpumask_andnot(mask, pt_owner->dirty_cpumask, cpumask_of(cpu));
if ( !cpumask_empty(mask) )
flush_area_mask(mask, ZERO_BLOCK_PTR, FLUSH_VA_VALID);
}
flush_tlb_local();
break;
case UVMF_ALL:
- mask = d->domain_dirty_cpumask;
+ mask = d->dirty_cpumask;
break;
default:
mask = this_cpu(scratch_cpumask);
paging_invlpg(v, va);
break;
case UVMF_ALL:
- mask = d->domain_dirty_cpumask;
+ mask = d->dirty_cpumask;
break;
default:
mask = this_cpu(scratch_cpumask);
p2m_change_type_range(d, begin_pfn, begin_pfn + nr,
p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
memset(dirty_bitmap, 0xff, size); /* consider all pages dirty */
}
* to be read-only, or via hardware-assisted log-dirty.
*/
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
return 0;
}
* be read-only, or via hardware-assisted log-dirty.
*/
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
/************************************************/
safe_write_pte(p, new);
if ( old_flags & _PAGE_PRESENT )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
paging_unlock(d);
return;
}
- ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
+ ept_sync_domain_mask(p2m, d->dirty_cpumask);
}
static void ept_tlb_flush(struct p2m_domain *p2m)
{
- ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
+ ept_sync_domain_mask(p2m, p2m->domain->dirty_cpumask);
}
static void ept_enable_pml(struct p2m_domain *p2m)
unmap_domain_page(tab);
if ( changed )
- flush_tlb_mask(p2m->domain->domain_dirty_cpumask);
+ flush_tlb_mask(p2m->domain->dirty_cpumask);
}
static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
p2m_unlock(p2m);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
/*
}
if ( ftlb )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
return 0;
}
rc = sh_validate_guest_entry(v, gmfn, entry, size);
if ( rc & SHADOW_SET_FLUSH )
/* Need to flush TLBs to pick up shadow PT changes */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
if ( rc & SHADOW_SET_ERROR )
{
/* This page is probably not a pagetable any more: tear it out of the
/* See if that freed up enough space */
if ( d->arch.paging.shadow.free_pages >= pages )
{
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
return;
}
}
pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
void shadow_blow_tables_per_domain(struct domain *d)
sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
/* Before we overwrite the old contents of this page,
* we need to be sure that no TLB holds a pointer to it. */
- cpumask_copy(&mask, d->domain_dirty_cpumask);
+ cpumask_copy(&mask, d->dirty_cpumask);
tlbflush_filter(&mask, sp->tlbflush_timestamp);
if ( unlikely(!cpumask_empty(&mask)) )
{
/* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
paging_unlock(d);
}
{
sh_remove_all_shadows_and_parents(d, mfn);
if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
}
sh_remove_all_shadows_and_parents(d, omfn);
if ( sh_remove_all_mappings(d, omfn,
_gfn(gfn + (i << PAGE_SHIFT))) )
- cpumask_or(&flushmask, &flushmask,
- d->domain_dirty_cpumask);
+ cpumask_or(&flushmask, &flushmask, d->dirty_cpumask);
}
omfn = _mfn(mfn_x(omfn) + 1);
}
}
}
if ( flush_tlb )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
goto out;
out_sl1ma:
perfc_incr(shadow_rm_write_flush_tlb);
smp_wmb();
atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries, based on the
}
}
if ( flush )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
/* Now install the new shadows. */
for ( i = 0; i < 4; i++ )
{
}
#elif GUEST_PAGING_LEVELS == 4
if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
{
}
}
if ( flush )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
/* Remember that we've seen the guest use this interface, so we
* can rely on it using it in future, instead of guessing at
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
/* Remember that we've seen the guest use this interface, so we
rwlock_init(&d->vnuma_rwlock);
err = -ENOMEM;
- if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
+ if ( !zalloc_cpumask_var(&d->dirty_cpumask) )
goto fail;
if ( domcr_flags & DOMCRF_hvm )
watchdog_domain_destroy(d);
if ( init_status & INIT_xsm )
xsm_free_security_domain(d);
- free_cpumask_var(d->domain_dirty_cpumask);
+ free_cpumask_var(d->dirty_cpumask);
free_domain_struct(d);
return ERR_PTR(err);
}
radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
xsm_free_security_domain(d);
- free_cpumask_var(d->domain_dirty_cpumask);
+ free_cpumask_var(d->dirty_cpumask);
xfree(d->vcpu);
free_domain_struct(d);
static inline void gnttab_flush_tlb(const struct domain *d)
{
if ( !paging_mode_external(d) )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(d->dirty_cpumask);
}
static inline unsigned int
process_pending_softirqs();
printk("General information for domain %u:\n", d->domain_id);
- cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
+ cpuset_print(tmpstr, sizeof(tmpstr), d->dirty_cpumask);
printk(" refcnt=%d dying=%d pause_count=%d\n",
atomic_read(&d->refcnt), d->is_dying,
atomic_read(&d->pause_count));
/* Shadow translated domain: p2m mapping */
pagetable_t phys_table;
- /* Same as domain_dirty_cpumask but limited to
+ /*
+ * Same as a domain's dirty_cpumask but limited to
* this p2m and those physical cpus whose vcpu's are in
* guestmode.
*/
unsigned long vm_assist;
/* Bitmask of CPUs which are holding onto this domain's state. */
- cpumask_var_t domain_dirty_cpumask;
+ cpumask_var_t dirty_cpumask;
struct arch_domain arch;