v->arch.pv_vcpu.need_update_runstate_area = 1;
}
-static inline int need_full_gdt(struct vcpu *v)
+static inline bool_t need_full_gdt(const struct domain *d)
{
- return (is_pv_vcpu(v) && !is_idle_vcpu(v));
+ return is_pv_domain(d) && !is_idle_domain(d);
}
static void __context_switch(void)
unsigned int cpu = smp_processor_id();
struct vcpu *p = per_cpu(curr_vcpu, cpu);
struct vcpu *n = current;
+ struct domain *pd = p->domain, *nd = n->domain;
struct desc_struct *gdt;
struct desc_ptr gdt_desc;
ASSERT(p != n);
ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
- if ( !is_idle_vcpu(p) )
+ if ( !is_idle_domain(pd) )
{
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
vcpu_save_fpu(p);
* ctxt_switch_to(). This avoids a race on things like EPT flushing,
* which is synchronised on that function.
*/
- if ( p->domain != n->domain )
- cpumask_set_cpu(cpu, n->domain->domain_dirty_cpumask);
+ if ( pd != nd )
+ cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask);
- if ( !is_idle_vcpu(n) )
+ if ( !is_idle_domain(nd) )
{
memcpy(stack_regs, &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES);
if ( cpu_has_xsave )
n->arch.ctxt_switch_to(n);
}
- psr_ctxt_switch_to(n->domain);
+ psr_ctxt_switch_to(nd);
- gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
- per_cpu(compat_gdt_table, cpu);
- if ( need_full_gdt(n) )
+ gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) :
+ per_cpu(compat_gdt_table, cpu);
+ if ( need_full_gdt(nd) )
{
unsigned long mfn = virt_to_mfn(gdt);
- l1_pgentry_t *pl1e = gdt_ldt_ptes(n->domain, n);
+ l1_pgentry_t *pl1e = gdt_ldt_ptes(nd, n);
unsigned int i;
for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR));
}
- if ( need_full_gdt(p) &&
- ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(n)) )
+ if ( need_full_gdt(pd) &&
+ ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(nd)) )
{
gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY);
write_ptbase(n);
- if ( need_full_gdt(n) &&
- ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(p)) )
+ if ( need_full_gdt(nd) &&
+ ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(pd)) )
{
gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
gdt_desc.base = GDT_VIRT_START(n);
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
}
- if ( p->domain != n->domain )
- cpumask_clear_cpu(cpu, p->domain->domain_dirty_cpumask);
+ if ( pd != nd )
+ cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask);
per_cpu(curr_vcpu, cpu) = n;
void context_switch(struct vcpu *prev, struct vcpu *next)
{
unsigned int cpu = smp_processor_id();
+ const struct domain *prevd = prev->domain, *nextd = next->domain;
cpumask_t dirty_mask;
ASSERT(local_irq_is_enabled());
if ( prev != next )
_update_runstate_area(prev);
- if ( is_hvm_vcpu(prev) )
+ if ( is_hvm_domain(prevd) )
{
if (prev != next)
vpmu_save(prev);
set_current(next);
if ( (per_cpu(curr_vcpu, cpu) == next) ||
- (is_idle_vcpu(next) && cpu_online(cpu)) )
+ (is_idle_domain(nextd) && cpu_online(cpu)) )
{
local_irq_enable();
}
{
__context_switch();
- if ( is_pv_vcpu(next) &&
- (is_idle_vcpu(prev) ||
- has_hvm_container_vcpu(prev) ||
- is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) )
+ if ( is_pv_domain(nextd) &&
+ (is_idle_domain(prevd) ||
+ has_hvm_container_domain(prevd) ||
+ is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) )
{
uint64_t efer = read_efer();
if ( !(efer & EFER_SCE) )
/* Re-enable interrupts before restoring state which may fault. */
local_irq_enable();
- if ( is_pv_vcpu(next) )
+ if ( is_pv_domain(nextd) )
{
load_LDT(next);
load_segments(next);
}
- set_cpuid_faulting(is_pv_vcpu(next) &&
- !is_control_domain(next->domain) &&
- !is_hardware_domain(next->domain));
+ set_cpuid_faulting(is_pv_domain(nextd) &&
+ !is_control_domain(nextd) &&
+ !is_hardware_domain(nextd));
}
- if (is_hvm_vcpu(next) && (prev != next) )
+ if (is_hvm_domain(nextd) && (prev != next) )
/* Must be done with interrupts enabled */
vpmu_load(next);