ia64/xen-unstable

changeset 4588:b1cb9f7f34f9

bitkeeper revision 1.1336 (42662e0bB62AMXvz0XCbYomA5_OHAw)

[PATCH] x86-64-enable-vmx.patch

Enable CONFIG_VMX for x86_64.

- Provides vmexit/entry handling code based on the x86_32 code
- Fix find_highest_vector for 64 bit (Benjamin Liu)

Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author arun.sharma@intel.com[kaf24]
date Wed Apr 20 10:25:15 2005 +0000 (2005-04-20)
parents 65d78e532664
children 6b0edacf8b40
files xen/arch/x86/Makefile xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_64/entry.S xen/include/asm-x86/config.h
line diff
     1.1 --- a/xen/arch/x86/Makefile	Wed Apr 20 10:24:53 2005 +0000
     1.2 +++ b/xen/arch/x86/Makefile	Wed Apr 20 10:25:15 2005 +0000
     1.3 @@ -7,10 +7,6 @@ OBJS += $(patsubst %.c,%.o,$(wildcard mt
     1.4  
     1.5  OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
     1.6  
     1.7 -ifneq ($(TARGET_SUBARCH),x86_32)
     1.8 -OBJS := $(patsubst vmx%.o,,$(OBJS))
     1.9 -endif
    1.10 -
    1.11  ifneq ($(crash_debug),y)
    1.12  OBJS := $(patsubst cdb%.o,,$(OBJS))
    1.13  endif
     2.1 --- a/xen/arch/x86/vmx.c	Wed Apr 20 10:24:53 2005 +0000
     2.2 +++ b/xen/arch/x86/vmx.c	Wed Apr 20 10:25:15 2005 +0000
     2.3 @@ -959,7 +959,12 @@ asmlinkage void load_cr2(void)
     2.4      struct exec_domain *d = current;
     2.5  
     2.6      local_irq_disable();        
     2.7 +#ifdef __i386__
     2.8      asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
     2.9 +#else
    2.10 +    asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
    2.11 +#endif
    2.12 +
    2.13  }
    2.14  
    2.15  #endif /* CONFIG_VMX */
     3.1 --- a/xen/arch/x86/vmx_intercept.c	Wed Apr 20 10:24:53 2005 +0000
     3.2 +++ b/xen/arch/x86/vmx_intercept.c	Wed Apr 20 10:25:15 2005 +0000
     3.3 @@ -37,7 +37,7 @@ int vmx_io_intercept(ioreq_t *p)
     3.4      struct exec_domain *d = current;
     3.5      struct vmx_handler_t *handler = &(d->arch.arch_vmx.vmx_platform.vmx_handler);
     3.6      int i;
     3.7 -    unsigned addr, offset;
     3.8 +    unsigned long addr, offset;
     3.9      for (i = 0; i < handler->num_slot; i++) {
    3.10          addr   = handler->hdl_list[i].addr;
    3.11          offset = handler->hdl_list[i].offset;
     4.1 --- a/xen/arch/x86/vmx_io.c	Wed Apr 20 10:24:53 2005 +0000
     4.2 +++ b/xen/arch/x86/vmx_io.c	Wed Apr 20 10:25:15 2005 +0000
     4.3 @@ -169,6 +169,17 @@ static void set_reg_value (int size, int
     4.4          break;
     4.5      }
     4.6  }
     4.7 +#else
     4.8 +static void load_xen_regs(struct xen_regs *regs)
     4.9 +{ 
    4.10 +	/* XXX: TBD */
    4.11 +	return;
    4.12 +}
    4.13 +static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
    4.14 +{
    4.15 +	/* XXX: TBD */
    4.16 +	return;
    4.17 +}
    4.18  #endif
    4.19  
    4.20  void vmx_io_assist(struct exec_domain *ed) 
    4.21 @@ -271,7 +282,8 @@ void vmx_io_assist(struct exec_domain *e
    4.22      }
    4.23  }
    4.24  
    4.25 -static inline int __fls(unsigned long word)
    4.26 +#ifdef __i386__
    4.27 +static inline int __fls(u32 word)
    4.28  {
    4.29      int bit;
    4.30  
    4.31 @@ -280,26 +292,57 @@ static inline int __fls(unsigned long wo
    4.32              :"rm" (word));
    4.33      return word ? bit : -1;
    4.34  }
    4.35 +#else
    4.36 +#define __fls(x) 	generic_fls(x)
    4.37 +static __inline__ int generic_fls(u32 x)
    4.38 +{
    4.39 +	int r = 32;
    4.40 +
    4.41 +	if (!x)
    4.42 +		return 0;
    4.43 +	if (!(x & 0xffff0000u)) {
    4.44 +		x <<= 16;
    4.45 +		r -= 16;
    4.46 +	}
    4.47 +	if (!(x & 0xff000000u)) {
    4.48 +		x <<= 8;
    4.49 +		r -= 8;
    4.50 +	}
    4.51 +	if (!(x & 0xf0000000u)) {
    4.52 +		x <<= 4;
    4.53 +		r -= 4;
    4.54 +	}
    4.55 +	if (!(x & 0xc0000000u)) {
    4.56 +		x <<= 2;
    4.57 +		r -= 2;
    4.58 +	}
    4.59 +	if (!(x & 0x80000000u)) {
    4.60 +                x <<= 1;
    4.61 +                r -= 1;
    4.62 +	}
    4.63 +	return r;
    4.64 +}
    4.65 +#endif
    4.66  
    4.67  
    4.68  /* Simple minded Local APIC priority implementation. Fix later */
    4.69 -static __inline__ int find_highest_irq(unsigned long *pintr)
    4.70 +static __inline__ int find_highest_irq(u32 *pintr)
    4.71  {
    4.72      if (pintr[7])
    4.73 -        return __fls(pintr[7]) + (256-32*1);
    4.74 +        return __fls(pintr[7]) + (255-32*1);
    4.75      if (pintr[6])
    4.76 -        return __fls(pintr[6]) + (256-32*2);
    4.77 +        return __fls(pintr[6]) + (255-32*2);
    4.78      if (pintr[5])
    4.79 -        return __fls(pintr[5]) + (256-32*3);
    4.80 +        return __fls(pintr[5]) + (255-32*3);
    4.81      if (pintr[4])
    4.82 -        return __fls(pintr[4]) + (256-32*4);
    4.83 +        return __fls(pintr[4]) + (255-32*4);
    4.84      if (pintr[3])
    4.85 -        return __fls(pintr[3]) + (256-32*5);
    4.86 +        return __fls(pintr[3]) + (255-32*5);
    4.87      if (pintr[2])
    4.88 -        return __fls(pintr[2]) + (256-32*6);
    4.89 +        return __fls(pintr[2]) + (255-32*6);
    4.90      if (pintr[1])
    4.91 -        return __fls(pintr[1]) + (256-32*7);
    4.92 -    return __fls(pintr[0]);
    4.93 +        return __fls(pintr[1]) + (255-32*7);
    4.94 +    return (__fls(pintr[0])-1);
    4.95  }
    4.96  
    4.97  /*
    4.98 @@ -317,7 +360,7 @@ static inline int find_highest_pending_i
    4.99          domain_crash_synchronous();
   4.100      }
   4.101          
   4.102 -    return find_highest_irq(&vio->vp_intr[0]);
   4.103 +    return find_highest_irq((unsigned int *)&vio->vp_intr[0]);
   4.104  }
   4.105  
   4.106  static inline void clear_highest_bit(struct exec_domain *d, int vector)
     5.1 --- a/xen/arch/x86/vmx_vmcs.c	Wed Apr 20 10:24:53 2005 +0000
     5.2 +++ b/xen/arch/x86/vmx_vmcs.c	Wed Apr 20 10:25:15 2005 +0000
     5.3 @@ -327,7 +327,11 @@ construct_init_vmcs_guest(execution_cont
     5.4      error |= __vmwrite(GUEST_EFLAGS, eflags);
     5.5  
     5.6      error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
     5.7 +#ifdef __i386__
     5.8      __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
     5.9 +#else
    5.10 +    __asm__ __volatile__ ("movq %%dr7, %0\n" : "=r" (dr7));
    5.11 +#endif
    5.12      error |= __vmwrite(GUEST_DR7, dr7);
    5.13      error |= __vmwrite(GUEST_VMCS0, 0xffffffff);
    5.14      error |= __vmwrite(GUEST_VMCS1, 0xffffffff);
    5.15 @@ -363,12 +367,21 @@ static inline int construct_vmcs_host(st
    5.16      host_env->idtr_base = desc.address;
    5.17      error |= __vmwrite(HOST_IDTR_BASE, host_env->idtr_base);
    5.18  
    5.19 +#ifdef __i386__
    5.20      __asm__ __volatile__ ("movl %%cr0,%0" : "=r" (crn) : );
    5.21 +#else
    5.22 +    __asm__ __volatile__ ("movq %%cr0,%0" : "=r" (crn) : );
    5.23 +#endif
    5.24 +
    5.25      host_env->cr0 = crn;
    5.26      error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
    5.27  
    5.28      /* CR3 is set in vmx_final_setup_hostos */
    5.29 +#ifdef __i386__
    5.30      __asm__ __volatile__ ("movl %%cr4,%0" : "=r" (crn) : ); 
    5.31 +#else
    5.32 +    __asm__ __volatile__ ("movq %%cr4,%0" : "=r" (crn) : ); 
    5.33 +#endif
    5.34      host_env->cr4 = crn;
    5.35      error |= __vmwrite(HOST_CR4, crn);
    5.36      error |= __vmwrite(HOST_EIP, (unsigned long) vmx_asm_vmexit_handler);
     6.1 --- a/xen/arch/x86/x86_64/entry.S	Wed Apr 20 10:24:53 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_64/entry.S	Wed Apr 20 10:25:15 2005 +0000
     6.3 @@ -151,6 +151,139 @@ test_all_events:
     6.4          movb  $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
     6.5          jmp   test_all_events
     6.6  
     6.7 +#ifdef CONFIG_VMX
     6.8 +/*
     6.9 + * At VMExit time the processor saves the guest selectors, rsp, rip, 
    6.10 + * and rflags. Therefore we don't save them, but simply decrement 
    6.11 + * the kernel stack pointer to make it consistent with the stack frame 
    6.12 + * at usual interruption time. The rflags of the host is not saved by VMX, 
    6.13 + * and we set it to the fixed value.
    6.14 + *
    6.15 + * We also need the room, especially because orig_eax field is used 
    6.16 + * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
    6.17 + *   (13) u64 gs_base_user;                 
    6.18 + *   (12) u64 gs_base_kernel;                 
    6.19 + *   (11) u64 fs_base;                 
    6.20 + *   (10) u64 gs;                 
    6.21 + *   (9)  u64 fs;
    6.22 + *   (8)  u64 ds;
    6.23 + *   (7)  u64 es;
    6.24 + *               <- get_stack_bottom() (= HOST_ESP)
    6.25 + *   (6)  u64 ss;
    6.26 + *   (5)  u64 rsp;
    6.27 + *   (4)  u64 rflags;
    6.28 + *   (3)  u64 cs;
    6.29 + *   (2)  u64 rip;
    6.30 + * (2/1)  u32 entry_vector;
    6.31 + * (1/1)  u32 error_code;
    6.32 + * However, get_stack_bottom() actually returns 64 bytes before the real
    6.33 + * bottom of the stack to allow space for:
    6.34 + * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
    6.35 + */
    6.36 +#define VMX_MONITOR_RFLAGS	0x202 /* IF on */
    6.37 +#define NR_SKIPPED_REGS	6	/* See the above explanation */
    6.38 +#define VMX_SAVE_ALL_NOSEGREGS \
    6.39 +        pushq $VMX_MONITOR_RFLAGS; \
    6.40 +        popfq; \
    6.41 +        subq $(NR_SKIPPED_REGS*8), %rsp; \
    6.42 +        pushq %rdi; \
    6.43 +        pushq %rsi; \
    6.44 +        pushq %rdx; \
    6.45 +        pushq %rcx; \
    6.46 +        pushq %rax; \
    6.47 +        pushq %r8;  \
    6.48 +        pushq %r9;  \
    6.49 +        pushq %r10; \
    6.50 +        pushq %r11; \
    6.51 +        pushq %rbx; \
    6.52 +        pushq %rbp; \
    6.53 +        pushq %r12; \
    6.54 +        pushq %r13; \
    6.55 +        pushq %r14; \
    6.56 +        pushq %r15; \
    6.57 +
    6.58 +ENTRY(vmx_asm_vmexit_handler)
    6.59 +        /* selectors are restored/saved by VMX */
    6.60 +        VMX_SAVE_ALL_NOSEGREGS
    6.61 +        call SYMBOL_NAME(vmx_vmexit_handler)
    6.62 +        jmp vmx_asm_do_resume
    6.63 +
    6.64 +ENTRY(vmx_asm_do_launch)
    6.65 +        popq %r15
    6.66 +        popq %r14
    6.67 +        popq %r13
    6.68 +        popq %r12
    6.69 +        popq %rbp
    6.70 +        popq %rbx
    6.71 +        popq %r11
    6.72 +        popq %r10
    6.73 +        popq %r9
    6.74 +        popq %r8
    6.75 +        popq %rax
    6.76 +        popq %rcx
    6.77 +        popq %rdx
    6.78 +        popq %rsi
    6.79 +        popq %rdi
    6.80 +        addq $(NR_SKIPPED_REGS*8), %rsp
    6.81 +        /* VMLUANCH */
    6.82 +        .byte 0x0f,0x01,0xc2
    6.83 +        pushfq
    6.84 +        call SYMBOL_NAME(vm_launch_fail)
    6.85 +        hlt
    6.86 +        
    6.87 +        ALIGN
    6.88 +        
    6.89 +ENTRY(vmx_asm_do_resume)
    6.90 +vmx_test_all_events:
    6.91 +        GET_CURRENT(%rbx)
    6.92 +/* test_all_events: */
    6.93 +        xorq %rcx,%rcx
    6.94 +        notq %rcx
    6.95 +        cli                             # tests must not race interrupts
    6.96 +/*test_softirqs:*/  
    6.97 +        movq EDOMAIN_processor(%rbx),%rax
    6.98 +#if 0
    6.99 +        shl  $6,%rax                    # sizeof(irq_cpustat) == 64
   6.100 +        test %rcx,SYMBOL_NAME(irq_stat)(%rax,1)
   6.101 +#endif
   6.102 +        jnz  vmx_process_softirqs
   6.103 +
   6.104 +vmx_restore_all_guest:
   6.105 +        call SYMBOL_NAME(load_cr2)
   6.106 +        /* 
   6.107 +         * Check if we are going back to VMX-based VM
   6.108 +         * By this time, all the setups in the VMCS must be complete.
   6.109 +         */
   6.110 +        popq %r15
   6.111 +        popq %r14
   6.112 +        popq %r13
   6.113 +        popq %r12
   6.114 +        popq %rbp
   6.115 +        popq %rbx
   6.116 +        popq %r11
   6.117 +        popq %r10
   6.118 +        popq %r9
   6.119 +        popq %r8
   6.120 +        popq %rax
   6.121 +        popq %rcx
   6.122 +        popq %rdx
   6.123 +        popq %rsi
   6.124 +        popq %rdi
   6.125 +        addq $(NR_SKIPPED_REGS*8), %rsp
   6.126 +        /* VMRESUME */
   6.127 +        .byte 0x0f,0x01,0xc3
   6.128 +        pushfq
   6.129 +        call SYMBOL_NAME(vm_resume_fail)
   6.130 +        /* Should never reach here */
   6.131 +        hlt
   6.132 +
   6.133 +        ALIGN
   6.134 +vmx_process_softirqs:
   6.135 +        sti       
   6.136 +        call SYMBOL_NAME(do_softirq)
   6.137 +        jmp  vmx_test_all_events
   6.138 +#endif
   6.139 +
   6.140          ALIGN
   6.141  /* %rbx: struct exec_domain */
   6.142  process_softirqs:
     7.1 --- a/xen/include/asm-x86/config.h	Wed Apr 20 10:24:53 2005 +0000
     7.2 +++ b/xen/include/asm-x86/config.h	Wed Apr 20 10:25:15 2005 +0000
     7.3 @@ -7,9 +7,7 @@
     7.4  #ifndef __X86_CONFIG_H__
     7.5  #define __X86_CONFIG_H__
     7.6  
     7.7 -#ifdef __i386__
     7.8  #define CONFIG_VMX 1
     7.9 -#endif
    7.10  
    7.11  #define CONFIG_X86 1
    7.12  #define CONFIG_SHADOW 1