}
#if CONFIG_HVM
-static int vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_vmce_vcpu ctxt = {
.caps = v->arch.vmce.mcg_cap,
return hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
}
-static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
spin_unlock(&pit->lock);
}
-static int pit_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check pit_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
PITState *pit = domain_vpit(d);
return rc;
}
-static int pit_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check pit_load(struct domain *d, hvm_domain_context_t *h)
{
PITState *pit = domain_vpit(d);
int i, rc = 0;
};
-static int hpet_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hpet_save(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
HPETState *hp = domain_vhpet(d);
return rc;
}
-static int hpet_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hpet_load(struct domain *d, hvm_domain_context_t *h)
{
HPETState *hp = domain_vhpet(d);
struct hvm_hw_hpet *rec;
destroy_vpci_mmcfg(d);
}
-static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_tsc_adjust ctxt = {
.tsc_adjust = v->arch.hvm.msr_tsc_adjust,
return hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
}
-static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
-static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
{
struct segment_register seg;
struct hvm_hw_cpu ctxt = {
(cet ? X86_CR4_CET : 0));
}
-static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
save_area) + \
xstate_ctxt_size(xcr0))
-static int hvm_save_cpu_xsave_states(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hvm_save_cpu_xsave_states(
+ struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_hw_cpu_xsave *ctxt;
unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
#undef compat_xsave_hdr
#undef xen_xsave_hdr
-static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hvm_load_cpu_xsave_states(
+ struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid, size;
int err;
MSR_AMD64_DR3_ADDRESS_MASK,
};
-static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
return 0;
}
-static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
{
unsigned int i, vcpuid = hvm_load_instance(h);
struct vcpu *v;
}
__initcall(dump_irq_info_key_init);
-static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
return rc;
}
-static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check irq_save_isa(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
return hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq);
}
-static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check irq_save_link(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
return hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link);
}
-static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
+static int cf_check irq_load_pci(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
int link, dev, intx, gsi;
return 0;
}
-static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
+static int cf_check irq_load_isa(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
int irq;
}
-static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
+static int cf_check irq_load_link(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
int link, gsi;
return 0;
}
-static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
{
const struct mtrr_state *mtrr_state = &v->arch.hvm.mtrr;
struct hvm_hw_mtrr hw_mtrr = {
return hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr);
}
-static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+static int cf_check hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid, i;
struct vcpu *v;
return X86EMUL_OKAY;
}
-static int acpi_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check acpi_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
return rc;
}
-static int acpi_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check acpi_load(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
PMTState *s = &d->arch.hvm.pl_time->vpmt;
}
/* Save RTC hardware state */
-static int rtc_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check rtc_save(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
RTCState *s = domain_vrtc(d);
}
/* Reload the hardware state from a saved domain */
-static int rtc_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check rtc_load(struct domain *d, hvm_domain_context_t *h)
{
RTCState *s = domain_vrtc(d);
return vioapic->redirtbl[pin].fields.trig_mode;
}
-static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
struct hvm_vioapic *s;
return hvm_save_entry(IOAPIC, 0, h, &s->domU);
}
-static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check ioapic_load(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_vioapic *s;
put_page_and_type(page);
}
-static int viridian_save_domain_ctxt(struct vcpu *v,
- hvm_domain_context_t *h)
+static int cf_check viridian_save_domain_ctxt(
+ struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
const struct viridian_domain *vd = d->arch.hvm.viridian;
return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
}
-static int viridian_load_domain_ctxt(struct domain *d,
- hvm_domain_context_t *h)
+static int cf_check viridian_load_domain_ctxt(
+ struct domain *d, hvm_domain_context_t *h)
{
struct viridian_domain *vd = d->arch.hvm.viridian;
struct hvm_viridian_domain_context ctxt;
HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
-static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check viridian_save_vcpu_ctxt(
+ struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_viridian_vcpu_context ctxt = {};
return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt);
}
-static int viridian_load_vcpu_ctxt(struct domain *d,
- hvm_domain_context_t *h)
+static int cf_check viridian_load_vcpu_ctxt(
+ struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
s->timer_last_update = s->pt.last_plt_gtime;
}
-static int lapic_save_hidden(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check lapic_save_hidden(struct vcpu *v, hvm_domain_context_t *h)
{
if ( !has_vlapic(v->domain) )
return 0;
return hvm_save_entry(LAPIC, v->vcpu_id, h, &vcpu_vlapic(v)->hw);
}
-static int lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h)
{
if ( !has_vlapic(v->domain) )
return 0;
}
}
-static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
+static int cf_check lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
return 0;
}
-static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
+static int cf_check lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
{
unsigned int vcpuid = hvm_load_instance(h);
struct vcpu *v;
return X86EMUL_OKAY;
}
-static int vpic_save(struct vcpu *v, hvm_domain_context_t *h)
+static int cf_check vpic_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
struct hvm_hw_vpic *s;
return 0;
}
-static int vpic_load(struct domain *d, hvm_domain_context_t *h)
+static int cf_check vpic_load(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_hw_vpic *s;
unsigned int inst = hvm_load_instance(h);