#include <public/version.h>
#include <public/memory.h>
+/* Xen command-line option to disable hardware-assisted paging */
+static int opt_hap_disabled;
+invbool_param("hap", opt_hap_disabled);
+
int hvm_enabled __read_mostly;
unsigned int opt_hvm_debug_level __read_mostly;
hvm_funcs = *fns;
hvm_enabled = 1;
+
+ if ( hvm_funcs.hap_supported )
+ {
+ if ( opt_hap_disabled )
+ hvm_funcs.hap_supported = 0;
+ printk("HVM: Hardware Assisted Paging %sabled\n",
+ hvm_funcs.hap_supported ? "en" : "dis");
+ }
}
void hvm_set_guest_time(struct vcpu *v, u64 gtime)
/* vmcb used for extended host state */
static void *root_vmcb[NR_CPUS] __read_mostly;
-/* hardware assisted paging bits */
-extern int opt_hap_enabled;
-
static void inline __update_guest_eip(
struct cpu_user_regs *regs, int inst_len)
{
.event_pending = svm_event_pending
};
-static void svm_npt_detect(void)
+static int svm_npt_detect(void)
{
u32 eax, ebx, ecx, edx;
/* Check CPUID for nested paging support. */
cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
- if ( !(edx & 1) && opt_hap_enabled )
- {
- printk("SVM: Nested paging is not supported by this CPU.\n");
- opt_hap_enabled = 0;
- }
+ return (edx & 1);
}
int start_svm(struct cpuinfo_x86 *c)
write_efer(read_efer() | EFER_SVME);
- svm_npt_detect();
-
/* Initialize the HSA for this core. */
phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
phys_hsa_lo = (u32) phys_hsa;
setup_vmcb_dump();
+ svm_function_table.hap_supported = svm_npt_detect();
+
hvm_enable(&svm_function_table);
- if ( opt_hap_enabled )
- printk("SVM: Nested paging enabled.\n");
-
return 1;
}
#include <asm/guest_access.h>
#include <xsm/xsm.h>
-/* Xen command-line option to enable hardware-assisted paging */
-int opt_hap_enabled;
-boolean_param("hap", opt_hap_enabled);
+#define hap_enabled(d) (hvm_funcs.hap_supported && is_hvm_domain(d))
/* Printouts */
#define PAGING_PRINTK(_f, _a...) \
shadow_domain_init(d);
/* ... but we will use hardware assistance if it's available. */
- if ( opt_hap_enabled && is_hvm_domain(d) )
+ if ( hap_enabled(d) )
hap_domain_init(d);
}
/* vcpu paging struct initialization goes here */
void paging_vcpu_init(struct vcpu *v)
{
- if ( opt_hap_enabled && is_hvm_vcpu(v) )
+ if ( hap_enabled(v->domain) )
hap_vcpu_init(v);
else
shadow_vcpu_init(v);
}
/* Here, dispatch domctl to the appropriate paging code */
- if ( opt_hap_enabled && is_hvm_domain(d) )
+ if ( hap_enabled(d) )
return hap_domctl(d, sc, u_domctl);
else
return shadow_domctl(d, sc, u_domctl);
/* Call when destroying a domain */
void paging_teardown(struct domain *d)
{
- if ( opt_hap_enabled && is_hvm_domain(d) )
+ if ( hap_enabled(d) )
hap_teardown(d);
else
shadow_teardown(d);
/* Call once all of the references to the domain have gone away */
void paging_final_teardown(struct domain *d)
{
- if ( opt_hap_enabled && is_hvm_domain(d) )
+ if ( hap_enabled(d) )
hap_final_teardown(d);
else
shadow_final_teardown(d);
* creation. */
int paging_enable(struct domain *d, u32 mode)
{
- if ( opt_hap_enabled && is_hvm_domain(d) )
+ if ( hap_enabled(d) )
return hap_enable(d, mode | PG_HAP_enable);
else
return shadow_enable(d, mode | PG_SH_enable);
struct hvm_function_table {
char *name;
+ /* Support Hardware-Assisted Paging? */
+ int hap_supported;
+
/*
* Initialise/destroy HVM domain/vcpu resources
*/