}
}
-bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
- void *ctxt)
-{
- static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
- cpumask_t *mask = &this_cpu(flush_cpumask);
- struct domain *d = current->domain;
- struct vcpu *v;
-
- /* Avoid deadlock if more than one vcpu tries this at the same time. */
- if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return false;
-
- /* Pause all other vcpus. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_pause_nosync(v);
-
- /* Now that all VCPUs are signalled to deschedule, we wait... */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
-
- /* All other vcpus are paused, safe to unlock now. */
- spin_unlock(&d->hypercall_deadlock_mutex);
-
- cpumask_clear(mask);
-
- /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
- for_each_vcpu ( d, v )
- {
- unsigned int cpu;
-
- if ( !flush_vcpu(ctxt, v) )
- continue;
-
- paging_update_cr3(v, false);
-
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- /* Flush TLBs on all CPUs with dirty vcpu state. */
- flush_tlb_mask(mask);
-
- /* Done. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_unpause(v);
-
- return true;
-}
-
static bool always_flush(void *ctxt, struct vcpu *v)
{
return true;
if ( !is_hvm_domain(current->domain) )
return -EINVAL;
- return hvm_flush_vcpu_tlb(always_flush, NULL) ? 0 : -ERESTART;
+ return paging_flush_tlb(always_flush, NULL) ? 0 : -ERESTART;
}
static int hvmop_set_evtchn_upcall_vector(
* A false return means that another vcpu is currently trying
* a similar operation, so back off.
*/
- if ( !hvm_flush_vcpu_tlb(need_flush, &input_params.vcpu_mask) )
+ if ( !paging_flush_tlb(need_flush, &input_params.vcpu_mask) )
return HVM_HCALL_preempted;
output.rep_complete = input.rep_count;
hvm_update_guest_cr3(v, noflush);
}
+static bool flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
.update_cr3 = hap_update_cr3,
.update_paging_modes = hap_update_paging_modes,
.write_p2m_entry = hap_write_p2m_entry,
+ .flush_tlb = flush_tlb,
.guest_levels = 1
};
.update_cr3 = hap_update_cr3,
.update_paging_modes = hap_update_paging_modes,
.write_p2m_entry = hap_write_p2m_entry,
+ .flush_tlb = flush_tlb,
.guest_levels = 2
};
.update_cr3 = hap_update_cr3,
.update_paging_modes = hap_update_paging_modes,
.write_p2m_entry = hap_write_p2m_entry,
+ .flush_tlb = flush_tlb,
.guest_levels = 3
};
.update_cr3 = hap_update_cr3,
.update_paging_modes = hap_update_paging_modes,
.write_p2m_entry = hap_write_p2m_entry,
+ .flush_tlb = flush_tlb,
.guest_levels = 4
};
return rc;
}
+/* Fluhs TLB of selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
.update_cr3 = sh_update_cr3,
.update_paging_modes = shadow_update_paging_modes,
.write_p2m_entry = shadow_write_p2m_entry,
+ .flush_tlb = shadow_flush_tlb,
.guest_levels = GUEST_PAGING_LEVELS,
.shadow.detach_old_tables = sh_detach_old_tables,
#ifdef CONFIG_PV
((count & PGC_allocated) ? 1 : 0) );
}
+/* Flush the TLB of the selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#endif /* _XEN_SHADOW_PRIVATE_H */
/*
signed int cr0_pg);
unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
-bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
- void *ctxt);
-
int hvm_copy_context_and_params(struct domain *src, struct domain *dst);
#ifdef CONFIG_HVM
unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level);
+ bool (*flush_tlb )(bool (*flush_vcpu)(void *ctxt,
+ struct vcpu *v),
+ void *ctxt);
unsigned int guest_levels;
return bits;
}
+static inline bool paging_flush_tlb(bool (*flush_vcpu)(void *ctxt,
+ struct vcpu *v),
+ void *ctxt)
+{
+ return paging_get_hostmode(current)->flush_tlb(flush_vcpu, ctxt);
+}
+
#endif /* XEN_PAGING_H */
/*