ia64/xen-unstable

diff xen/arch/x86/hvm/svm/svm.c @ 10892:0d2ba35c0cf2

[XEN] Add hypercall support for HVM guests. This is
fairly useless at the moment, since all of the hypercalls
fail, since copy_from_user doesn't work correctly in HVM
domains.

Signed-off-by: Steven Smith <ssmith@xensource.com>

Add a CPUID hypervisor platform interface at leaf
0x40000000. Allow hypercall transfer page to be filled
in via MSR 0x40000000.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 17:18:05 2006 +0100 (2006-08-01)
parents 7137825805c7
children 2e3b121662dc
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Aug 01 15:48:48 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Aug 01 17:18:05 2006 +0100
     1.3 @@ -456,6 +456,28 @@ void svm_init_ap_context(struct vcpu_gue
     1.4      ctxt->flags = VGCF_HVM_GUEST;
     1.5  }
     1.6  
     1.7 +static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
     1.8 +{
     1.9 +    char *p;
    1.10 +    int i;
    1.11 +
    1.12 +    memset(hypercall_page, 0, PAGE_SIZE);
    1.13 +
    1.14 +    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
    1.15 +    {
    1.16 +        p = (char *)(hypercall_page + (i * 32));
    1.17 +        *(u8  *)(p + 0) = 0xb8; /* mov imm32, %eax */
    1.18 +        *(u32 *)(p + 1) = i;
    1.19 +        *(u8  *)(p + 5) = 0x0f; /* vmmcall */
    1.20 +        *(u8  *)(p + 6) = 0x01;
    1.21 +        *(u8  *)(p + 7) = 0xd9;
    1.22 +        *(u8  *)(p + 8) = 0xc3; /* ret */
    1.23 +    }
    1.24 +
    1.25 +    /* Don't support HYPERVISOR_iret at the moment */
    1.26 +    *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
    1.27 +}
    1.28 +
    1.29  int start_svm(void)
    1.30  {
    1.31      u32 eax, ecx, edx;
    1.32 @@ -504,6 +526,8 @@ int start_svm(void)
    1.33      hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
    1.34      hvm_funcs.init_ap_context = svm_init_ap_context;
    1.35  
    1.36 +    hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
    1.37 +
    1.38      hvm_enabled = 1;    
    1.39  
    1.40      return 1;
    1.41 @@ -1980,11 +2004,13 @@ static int svm_cr_access(struct vcpu *v,
    1.42      return result;
    1.43  }
    1.44  
    1.45 -static inline void svm_do_msr_access(struct vcpu *v, struct cpu_user_regs *regs)
    1.46 +static inline void svm_do_msr_access(
    1.47 +    struct vcpu *v, struct cpu_user_regs *regs)
    1.48  {
    1.49      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.50      int  inst_len;
    1.51      u64 msr_content=0;
    1.52 +    u32 eax, edx;
    1.53  
    1.54      ASSERT(vmcb);
    1.55  
    1.56 @@ -2018,6 +2044,14 @@ static inline void svm_do_msr_access(str
    1.57          default:
    1.58              if (long_mode_do_msr_read(regs))
    1.59                  goto done;
    1.60 +
    1.61 +            if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
    1.62 +            {
    1.63 +                regs->eax = eax;
    1.64 +                regs->edx = edx;
    1.65 +                goto done;
    1.66 +            }
    1.67 +
    1.68              rdmsr_safe(regs->ecx, regs->eax, regs->edx);
    1.69              break;
    1.70          }
    1.71 @@ -2047,7 +2081,8 @@ static inline void svm_do_msr_access(str
    1.72              vlapic_msr_set(VLAPIC(v), msr_content);
    1.73              break;
    1.74          default:
    1.75 -            long_mode_do_msr_write(regs);
    1.76 +            if ( !long_mode_do_msr_write(regs) )
    1.77 +                wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
    1.78              break;
    1.79          }
    1.80      }