ia64/xen-unstable
changeset 15918:49700bb716bb
hvm: New HVM function hvm_set_segment_register().
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Mon Sep 17 13:33:09 2007 +0100 (2007-09-17) |
parents | babe17e7a4ee |
children | 35fb20c4822c |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/svm/svm.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Mon Sep 17 10:38:59 2007 +0100 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Sep 17 13:33:09 2007 +0100 1.3 @@ -618,9 +618,7 @@ static void svm_sync_vmcb(struct vcpu *v 1.4 1.5 arch_svm->vmcb_in_sync = 1; 1.6 1.7 - asm volatile ( 1.8 - ".byte 0x0f,0x01,0xdb" /* vmsave */ 1.9 - : : "a" (__pa(arch_svm->vmcb)) ); 1.10 + svm_vmsave(arch_svm->vmcb); 1.11 } 1.12 1.13 static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg) 1.14 @@ -649,6 +647,7 @@ static void svm_get_segment_register(str 1.15 struct segment_register *reg) 1.16 { 1.17 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.18 + 1.19 switch ( seg ) 1.20 { 1.21 case x86_seg_cs: 1.22 @@ -685,7 +684,58 @@ static void svm_get_segment_register(str 1.23 svm_sync_vmcb(v); 1.24 memcpy(reg, &vmcb->ldtr, sizeof(*reg)); 1.25 break; 1.26 - default: BUG(); 1.27 + default: 1.28 + BUG(); 1.29 + } 1.30 +} 1.31 + 1.32 +static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, 1.33 + struct segment_register *reg) 1.34 +{ 1.35 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.36 + 1.37 + switch ( seg ) 1.38 + { 1.39 + case x86_seg_cs: 1.40 + memcpy(&vmcb->cs, reg, sizeof(*reg)); 1.41 + break; 1.42 + case x86_seg_ds: 1.43 + memcpy(&vmcb->ds, reg, sizeof(*reg)); 1.44 + break; 1.45 + case x86_seg_es: 1.46 + memcpy(&vmcb->es, reg, sizeof(*reg)); 1.47 + break; 1.48 + case x86_seg_fs: 1.49 + svm_sync_vmcb(v); 1.50 + memcpy(&vmcb->fs, reg, sizeof(*reg)); 1.51 + svm_vmload(vmcb); 1.52 + break; 1.53 + case x86_seg_gs: 1.54 + svm_sync_vmcb(v); 1.55 + memcpy(&vmcb->gs, reg, sizeof(*reg)); 1.56 + svm_vmload(vmcb); 1.57 + break; 1.58 + case x86_seg_ss: 1.59 + memcpy(&vmcb->ss, reg, sizeof(*reg)); 1.60 + break; 1.61 + case x86_seg_tr: 1.62 + svm_sync_vmcb(v); 1.63 + memcpy(&vmcb->tr, reg, sizeof(*reg)); 1.64 + svm_vmload(vmcb); 1.65 + break; 1.66 + case x86_seg_gdtr: 1.67 + memcpy(&vmcb->gdtr, reg, sizeof(*reg)); 1.68 + break; 1.69 + case x86_seg_idtr: 1.70 + memcpy(&vmcb->idtr, reg, sizeof(*reg)); 1.71 + break; 1.72 + case x86_seg_ldtr: 1.73 + svm_sync_vmcb(v); 1.74 + memcpy(&vmcb->ldtr, reg, sizeof(*reg)); 1.75 + svm_vmload(vmcb); 1.76 + break; 1.77 + default: 1.78 + BUG(); 1.79 } 1.80 } 1.81 1.82 @@ -787,10 +837,7 @@ static void svm_ctxt_switch_from(struct 1.83 svm_save_dr(v); 1.84 1.85 svm_sync_vmcb(v); 1.86 - 1.87 - asm volatile ( 1.88 - ".byte 0x0f,0x01,0xda" /* vmload */ 1.89 - : : "a" (__pa(root_vmcb[cpu])) ); 1.90 + svm_vmload(root_vmcb[cpu]); 1.91 1.92 #ifdef __x86_64__ 1.93 /* Resume use of ISTs now that the host TR is reinstated. */ 1.94 @@ -826,12 +873,8 @@ static void svm_ctxt_switch_to(struct vc 1.95 1.96 svm_restore_dr(v); 1.97 1.98 - asm volatile ( 1.99 - ".byte 0x0f,0x01,0xdb" /* vmsave */ 1.100 - : : "a" (__pa(root_vmcb[cpu])) ); 1.101 - asm volatile ( 1.102 - ".byte 0x0f,0x01,0xda" /* vmload */ 1.103 - : : "a" (__pa(v->arch.hvm_svm.vmcb)) ); 1.104 + svm_vmsave(root_vmcb[cpu]); 1.105 + svm_vmload(v->arch.hvm_svm.vmcb); 1.106 } 1.107 1.108 static void svm_do_resume(struct vcpu *v) 1.109 @@ -926,6 +969,7 @@ static struct hvm_function_table svm_fun 1.110 .guest_x86_mode = svm_guest_x86_mode, 1.111 .get_segment_base = svm_get_segment_base, 1.112 .get_segment_register = svm_get_segment_register, 1.113 + .set_segment_register = svm_set_segment_register, 1.114 .update_host_cr3 = svm_update_host_cr3, 1.115 .update_guest_cr = svm_update_guest_cr, 1.116 .update_guest_efer = svm_update_guest_efer,
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 17 10:38:59 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 17 13:33:09 2007 +0100 2.3 @@ -957,6 +957,79 @@ static void vmx_get_segment_register(str 2.4 reg->attr.fields.p = 0; 2.5 } 2.6 2.7 +static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, 2.8 + struct segment_register *reg) 2.9 +{ 2.10 + u16 attr; 2.11 + 2.12 + ASSERT(v == current); 2.13 + 2.14 + attr = reg->attr.bytes; 2.15 + attr = ((attr & 0xf00) << 4) | (attr & 0xff); 2.16 + 2.17 + switch ( seg ) 2.18 + { 2.19 + case x86_seg_cs: 2.20 + __vmwrite(GUEST_CS_SELECTOR, reg->sel); 2.21 + __vmwrite(GUEST_CS_LIMIT, reg->limit); 2.22 + __vmwrite(GUEST_CS_BASE, reg->base); 2.23 + __vmwrite(GUEST_CS_AR_BYTES, attr); 2.24 + break; 2.25 + case x86_seg_ds: 2.26 + __vmwrite(GUEST_DS_SELECTOR, reg->sel); 2.27 + __vmwrite(GUEST_DS_LIMIT, reg->limit); 2.28 + __vmwrite(GUEST_DS_BASE, reg->base); 2.29 + __vmwrite(GUEST_DS_AR_BYTES, attr); 2.30 + break; 2.31 + case x86_seg_es: 2.32 + __vmwrite(GUEST_ES_SELECTOR, reg->sel); 2.33 + __vmwrite(GUEST_ES_LIMIT, reg->limit); 2.34 + __vmwrite(GUEST_ES_BASE, reg->base); 2.35 + __vmwrite(GUEST_ES_AR_BYTES, attr); 2.36 + break; 2.37 + case x86_seg_fs: 2.38 + __vmwrite(GUEST_FS_SELECTOR, reg->sel); 2.39 + __vmwrite(GUEST_FS_LIMIT, reg->limit); 2.40 + __vmwrite(GUEST_FS_BASE, reg->base); 2.41 + __vmwrite(GUEST_FS_AR_BYTES, attr); 2.42 + break; 2.43 + case x86_seg_gs: 2.44 + __vmwrite(GUEST_GS_SELECTOR, reg->sel); 2.45 + __vmwrite(GUEST_GS_LIMIT, reg->limit); 2.46 + __vmwrite(GUEST_GS_BASE, reg->base); 2.47 + __vmwrite(GUEST_GS_AR_BYTES, attr); 2.48 + break; 2.49 + case x86_seg_ss: 2.50 + __vmwrite(GUEST_SS_SELECTOR, reg->sel); 2.51 + __vmwrite(GUEST_SS_LIMIT, reg->limit); 2.52 + __vmwrite(GUEST_SS_BASE, reg->base); 2.53 + __vmwrite(GUEST_SS_AR_BYTES, attr); 2.54 + break; 2.55 + case x86_seg_tr: 2.56 + __vmwrite(GUEST_TR_SELECTOR, reg->sel); 2.57 + __vmwrite(GUEST_TR_LIMIT, reg->limit); 2.58 + __vmwrite(GUEST_TR_BASE, reg->base); 2.59 + __vmwrite(GUEST_TR_AR_BYTES, attr); 2.60 + break; 2.61 + case x86_seg_gdtr: 2.62 + __vmwrite(GUEST_GDTR_LIMIT, reg->limit); 2.63 + __vmwrite(GUEST_GDTR_BASE, reg->base); 2.64 + break; 2.65 + case x86_seg_idtr: 2.66 + __vmwrite(GUEST_IDTR_LIMIT, reg->limit); 2.67 + __vmwrite(GUEST_IDTR_BASE, reg->base); 2.68 + break; 2.69 + case x86_seg_ldtr: 2.70 + __vmwrite(GUEST_LDTR_SELECTOR, reg->sel); 2.71 + __vmwrite(GUEST_LDTR_LIMIT, reg->limit); 2.72 + __vmwrite(GUEST_LDTR_BASE, reg->base); 2.73 + __vmwrite(GUEST_LDTR_AR_BYTES, attr); 2.74 + break; 2.75 + default: 2.76 + BUG(); 2.77 + } 2.78 +} 2.79 + 2.80 /* Make sure that xen intercepts any FP accesses from current */ 2.81 static void vmx_stts(struct vcpu *v) 2.82 { 2.83 @@ -1160,6 +1233,7 @@ static struct hvm_function_table vmx_fun 2.84 .guest_x86_mode = vmx_guest_x86_mode, 2.85 .get_segment_base = vmx_get_segment_base, 2.86 .get_segment_register = vmx_get_segment_register, 2.87 + .set_segment_register = vmx_set_segment_register, 2.88 .update_host_cr3 = vmx_update_host_cr3, 2.89 .update_guest_cr = vmx_update_guest_cr, 2.90 .update_guest_efer = vmx_update_guest_efer,
3.1 --- a/xen/include/asm-x86/hvm/hvm.h Mon Sep 17 10:38:59 2007 +0100 3.2 +++ b/xen/include/asm-x86/hvm/hvm.h Mon Sep 17 13:33:09 2007 +0100 3.3 @@ -105,6 +105,8 @@ struct hvm_function_table { 3.4 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg); 3.5 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg, 3.6 struct segment_register *reg); 3.7 + void (*set_segment_register)(struct vcpu *v, enum x86_segment seg, 3.8 + struct segment_register *reg); 3.9 3.10 /* 3.11 * Re-set the value of CR3 that Xen runs on when handling VM exits. 3.12 @@ -254,6 +256,13 @@ hvm_get_segment_register(struct vcpu *v, 3.13 hvm_funcs.get_segment_register(v, seg, reg); 3.14 } 3.15 3.16 +static inline void 3.17 +hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, 3.18 + struct segment_register *reg) 3.19 +{ 3.20 + hvm_funcs.set_segment_register(v, seg, reg); 3.21 +} 3.22 + 3.23 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, 3.24 unsigned int *ecx, unsigned int *edx); 3.25 void hvm_migrate_timers(struct vcpu *v);
4.1 --- a/xen/include/asm-x86/hvm/svm/svm.h Mon Sep 17 10:38:59 2007 +0100 4.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h Mon Sep 17 13:33:09 2007 +0100 4.3 @@ -28,7 +28,7 @@ 4.4 #include <asm/hvm/svm/vmcb.h> 4.5 #include <asm/i387.h> 4.6 4.7 -extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb); 4.8 +void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb); 4.9 4.10 #define SVM_REG_EAX (0) 4.11 #define SVM_REG_ECX (1) 4.12 @@ -47,4 +47,18 @@ extern void svm_dump_vmcb(const char *fr 4.13 #define SVM_REG_R14 (14) 4.14 #define SVM_REG_R15 (15) 4.15 4.16 +static inline void svm_vmload(void *vmcb) 4.17 +{ 4.18 + asm volatile ( 4.19 + ".byte 0x0f,0x01,0xda" /* vmload */ 4.20 + : : "a" (__pa(vmcb)) : "memory" ); 4.21 +} 4.22 + 4.23 +static inline void svm_vmsave(void *vmcb) 4.24 +{ 4.25 + asm volatile ( 4.26 + ".byte 0x0f,0x01,0xdb" /* vmsave */ 4.27 + : : "a" (__pa(vmcb)) : "memory" ); 4.28 +} 4.29 + 4.30 #endif /* __ASM_X86_HVM_SVM_H__ */