else if ( using_svm() )
fns = start_svm();
+ if ( fns )
+ hvm_funcs = *fns;
+
+ if ( IS_ENABLED(CONFIG_INTEL_VMX) )
+ vmx_fill_funcs();
+ if ( IS_ENABLED(CONFIG_AMD_SVM) )
+ svm_fill_funcs();
+
if ( fns == NULL )
return 0;
- hvm_funcs = *fns;
hvm_enabled = 1;
printk("HVM: %s enabled\n", fns->name);
return &svm_function_table;
}
+void __init svm_fill_funcs(void)
+{
+ /*
+ * Now that svm_function_table was copied, populate all function pointers
+ * which may have been left at NULL, for __initdata_cf_clobber to have as
+ * much of an effect as possible.
+ */
+ if ( !cpu_has_xen_ibt )
+ return;
+
+ /* Nothing at present. */
+}
+
void asmlinkage svm_vmexit_handler(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
return &vmx_function_table;
}
+void __init vmx_fill_funcs(void)
+{
+ /*
+ * Now that vmx_function_table was copied, populate all function pointers
+ * which may have been left at NULL, for __initdata_cf_clobber to have as
+ * much of an effect as possible.
+ */
+ if ( !cpu_has_xen_ibt )
+ return;
+
+ vmx_function_table.set_descriptor_access_exiting =
+ vmx_set_descriptor_access_exiting;
+
+ vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
+ vmx_function_table.process_isr = vmx_process_isr;
+ vmx_function_table.handle_eoi = vmx_handle_eoi;
+
+ vmx_function_table.pi_update_irte = vmx_pi_update_irte;
+
+ vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
+ vmx_function_table.sync_pir_to_irr = vmx_sync_pir_to_irr;
+ vmx_function_table.test_pir = vmx_test_pir;
+}
+
/*
* Not all cases receive valid value in the VM-exit instruction length field.
* Callers must know what they're doing!
extern const struct hvm_function_table *start_svm(void);
extern const struct hvm_function_table *start_vmx(void);
+void svm_fill_funcs(void);
+void vmx_fill_funcs(void);
+
int hvm_domain_initialise(struct domain *d,
const struct xen_domctl_createdomain *config);
void hvm_domain_relinquish_resources(struct domain *d);