void (*free_page)(struct domain *d, struct page_info *pg);
void (*update_paging_modes)(struct vcpu *v);
+
+#ifdef CONFIG_HVM
+ /* Flush selected vCPUs TLBs. NULL for all. */
+ bool __must_check (*flush_tlb)(const unsigned long *vcpu_bitmap);
+#endif
};
struct paging_vcpu {
#endif
void (*update_cr3 )(struct vcpu *v, int do_locking,
bool noflush);
- bool (*flush_tlb )(const unsigned long *vcpu_bitmap);
unsigned int guest_levels;
page_order);
}
+/* Flush selected vCPUs TLBs. NULL for all. */
+static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap)
+{
+ return current->domain->arch.paging.flush_tlb(vcpu_bitmap);
+}
+
#endif /* CONFIG_HVM */
/* Update all the things that are derived from the guest's CR3.
return bits;
}
-/* Flush selected vCPUs TLBs. NULL for all. */
-static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap)
-{
- return paging_get_hostmode(current)->flush_tlb(vcpu_bitmap);
-}
-
#endif /* XEN_PAGING_H */
/*
/************************************************/
static void cf_check hap_update_paging_modes(struct vcpu *v);
+static bool cf_check flush_tlb(const unsigned long *vcpu_bitmap);
void hap_domain_init(struct domain *d)
{
paging_log_dirty_init(d, &hap_ops);
d->arch.paging.update_paging_modes = hap_update_paging_modes;
+ d->arch.paging.flush_tlb = flush_tlb;
}
/* return 0 for success, -errno for failure */
.gva_to_gfn = hap_gva_to_gfn_real_mode,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_real_mode,
.update_cr3 = hap_update_cr3,
- .flush_tlb = flush_tlb,
.guest_levels = 1
};
.gva_to_gfn = hap_gva_to_gfn_2_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_2_levels,
.update_cr3 = hap_update_cr3,
- .flush_tlb = flush_tlb,
.guest_levels = 2
};
.gva_to_gfn = hap_gva_to_gfn_3_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_3_levels,
.update_cr3 = hap_update_cr3,
- .flush_tlb = flush_tlb,
.guest_levels = 3
};
.gva_to_gfn = hap_gva_to_gfn_4_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_4_levels,
.update_cr3 = hap_update_cr3,
- .flush_tlb = flush_tlb,
.guest_levels = 4
};
d->arch.paging.shadow.oos_active = 0;
#endif
#ifdef CONFIG_HVM
+ d->arch.paging.flush_tlb = shadow_flush_tlb;
d->arch.paging.shadow.pagetable_dying_op = 0;
#endif
paging_unlock(d);
}
-
-static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap)
-{
- return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap);
-}
-
-/* Flush TLB of selected vCPUs. NULL for all. */
-bool cf_check shadow_flush_tlb(const unsigned long *vcpu_bitmap)
-{
- static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
- cpumask_t *mask = &this_cpu(flush_cpumask);
- struct domain *d = current->domain;
- struct vcpu *v;
-
- /* Avoid deadlock if more than one vcpu tries this at the same time. */
- if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return false;
-
- /* Pause all other vcpus. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(v, vcpu_bitmap) )
- vcpu_pause_nosync(v);
-
- /* Now that all VCPUs are signalled to deschedule, we wait... */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(v, vcpu_bitmap) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
-
- /* All other vcpus are paused, safe to unlock now. */
- spin_unlock(&d->hypercall_deadlock_mutex);
-
- cpumask_clear(mask);
-
- /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
- for_each_vcpu ( d, v )
- {
- unsigned int cpu;
-
- if ( !flush_vcpu(v, vcpu_bitmap) )
- continue;
-
- paging_update_cr3(v, false);
-
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- /* Flush TLBs on all CPUs with dirty vcpu state. */
- guest_flush_tlb_mask(d, mask);
-
- /* Done. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(v, vcpu_bitmap) )
- vcpu_unpause(v);
-
- return true;
-}
-
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}
+static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap)
+{
+ return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap);
+}
+
+/* Flush TLB of selected vCPUs. NULL for all. */
+bool cf_check shadow_flush_tlb(const unsigned long *vcpu_bitmap)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ const struct vcpu *curr = current;
+ struct domain *d = curr->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != curr && flush_vcpu(v, vcpu_bitmap) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != curr && flush_vcpu(v, vcpu_bitmap) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(v, vcpu_bitmap) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ guest_flush_tlb_mask(d, mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != curr && flush_vcpu(v, vcpu_bitmap) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels)
{
struct domain *d = v->domain;
.gva_to_gfn = sh_gva_to_gfn,
#endif
.update_cr3 = sh_update_cr3,
- .flush_tlb = shadow_flush_tlb,
.guest_levels = GUEST_PAGING_LEVELS,
#ifdef CONFIG_PV
.shadow.write_guest_entry = sh_write_guest_entry,