ia64/xen-unstable

changeset 11577:3236311a23a5

Fix vmxassist to boot Vista.

Currently, guest_linear_to_real to_real() in vmxassist assumes guest HVM
use 2 level page table when entering protect mode with PG enabled. Vista
uses 3 level (PAE enabled) page table, so memory addressing is wrong.
This patch fixes it, by considering all 4 possible cases.

Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
author Steven Hand <steven@xensource.com>
date Fri Sep 22 12:14:22 2006 +0100 (2006-09-22)
parents ad22c711ccb7
children 0e9055d69f12
files tools/firmware/vmxassist/machine.h tools/firmware/vmxassist/vm86.c
line diff
     1.1 --- a/tools/firmware/vmxassist/machine.h	Fri Sep 22 11:37:31 2006 +0100
     1.2 +++ b/tools/firmware/vmxassist/machine.h	Fri Sep 22 12:14:22 2006 +0100
     1.3 @@ -36,6 +36,7 @@
     1.4  #define CR4_VME		(1 << 0)
     1.5  #define CR4_PVI		(1 << 1)
     1.6  #define CR4_PSE		(1 << 4)
     1.7 +#define CR4_PAE		(1 << 5)
     1.8  
     1.9  #define EFLAGS_ZF	(1 << 6)
    1.10  #define EFLAGS_TF	(1 << 8)
     2.1 --- a/tools/firmware/vmxassist/vm86.c	Fri Sep 22 11:37:31 2006 +0100
     2.2 +++ b/tools/firmware/vmxassist/vm86.c	Fri Sep 22 12:14:22 2006 +0100
     2.3 @@ -52,29 +52,74 @@ char *states[] = {
     2.4  static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
     2.5  #endif /* DEBUG */
     2.6  
     2.7 +#define PDE_PS           (1 << 7)
     2.8  #define PT_ENTRY_PRESENT 0x1
     2.9  
    2.10 +/* We only support access to <=4G physical memory due to 1:1 mapping */
    2.11  static unsigned
    2.12 -guest_linear_to_real(unsigned long base, unsigned off)
    2.13 +guest_linear_to_real(uint32_t base)
    2.14  {
    2.15 -	unsigned int gcr3 = oldctx.cr3;
    2.16 -	unsigned int l1_mfn;
    2.17 -	unsigned int l0_mfn;
    2.18 +	uint32_t gcr3 = oldctx.cr3;
    2.19 +	uint64_t l2_mfn;
    2.20 +	uint64_t l1_mfn;
    2.21 +	uint64_t l0_mfn;
    2.22  
    2.23  	if (!(oldctx.cr0 & CR0_PG))
    2.24 -		return base + off;
    2.25 +		return base;
    2.26 +
    2.27 +	if (!(oldctx.cr4 & CR4_PAE)) {
    2.28 +		l1_mfn = ((uint32_t *)gcr3)[(base >> 22) & 0x3ff];
    2.29 +
    2.30 +		if (oldctx.cr4 & CR4_PSE || l1_mfn & PDE_PS) {
    2.31 +                        /* 1 level page table */
    2.32 +			l0_mfn = l1_mfn;
    2.33 +			if (!(l0_mfn & PT_ENTRY_PRESENT))
    2.34 +				panic("l1 entry not present\n");
    2.35  
    2.36 -	l1_mfn = ((unsigned int *)gcr3)[(base >> 22) & 0x3ff ];
    2.37 -	if (!(l1_mfn & PT_ENTRY_PRESENT))
    2.38 -		panic("l2 entry not present\n");
    2.39 -	l1_mfn = l1_mfn & 0xfffff000 ;
    2.40 +			l0_mfn &= 0xffc00000;
    2.41 +			return l0_mfn + (base & 0x3fffff);
    2.42 +		}
    2.43 +
    2.44 +		if (!(l1_mfn & PT_ENTRY_PRESENT))
    2.45 +			panic("l2 entry not present\n");
    2.46 +
    2.47 +		l1_mfn &= 0xfffff000;
    2.48 +		l0_mfn = ((uint32_t *)l1_mfn)[(base >> 12) & 0x3ff];
    2.49 +		if (!(l0_mfn & PT_ENTRY_PRESENT))
    2.50 +			panic("l1 entry not present\n");
    2.51 +		l0_mfn &= 0xfffff000;
    2.52  
    2.53 -	l0_mfn = ((unsigned int *)l1_mfn)[(base >> 12) & 0x3ff];
    2.54 -	if (!(l0_mfn & PT_ENTRY_PRESENT))
    2.55 -		panic("l1 entry not present\n");
    2.56 -	l0_mfn = l0_mfn & 0xfffff000;
    2.57 +		return l0_mfn + (base & 0xfff);
    2.58 +	} else if (oldctx.cr4 & CR4_PAE && !(oldctx.cr4 & CR4_PSE)) {
    2.59 +		l2_mfn = ((uint64_t *)gcr3)[(base >> 30) & 0x3];
    2.60 +		if (!(l2_mfn & PT_ENTRY_PRESENT))
    2.61 +			panic("l3 entry not present\n");
    2.62 +		l2_mfn &= 0x3fffff000ULL;
    2.63 +
    2.64 +		l1_mfn = ((uint64_t *)l2_mfn)[(base >> 21) & 0x1ff];
    2.65 +		if (!(l1_mfn & PT_ENTRY_PRESENT))
    2.66 +			panic("l2 entry not present\n");
    2.67 +		l1_mfn &= 0x3fffff000ULL;
    2.68  
    2.69 -	return l0_mfn + off + (base & 0xfff);
    2.70 +		l0_mfn = ((uint64_t *)l1_mfn)[(base >> 12) & 0x1ff];
    2.71 +		if (!(l0_mfn & PT_ENTRY_PRESENT))
    2.72 +			panic("l1 entry not present\n");
    2.73 +		l0_mfn &= 0x3fffff000ULL;
    2.74 +
    2.75 +		return l0_mfn + (base & 0xfff);
    2.76 +	} else { /* oldctx.cr4 & CR4_PAE && oldctx.cr4 & CR4_PSE */
    2.77 +		l1_mfn = ((uint64_t *)gcr3)[(base >> 30) & 0x3];
    2.78 +		if (!(l1_mfn & PT_ENTRY_PRESENT))
    2.79 +			panic("l2 entry not present\n");
    2.80 +		l1_mfn &= 0x3fffff000ULL;
    2.81 +
    2.82 +		l0_mfn = ((uint64_t *)l1_mfn)[(base >> 21) & 0x1ff];
    2.83 +		if (!(l0_mfn & PT_ENTRY_PRESENT))
    2.84 +			panic("l1 entry not present\n");
    2.85 +		l0_mfn &= 0x3ffe00000ULL;
    2.86 +
    2.87 +		return l0_mfn + (base & 0x1fffff);
    2.88 +	}
    2.89  }
    2.90  
    2.91  static unsigned
    2.92 @@ -95,7 +140,8 @@ address(struct regs *regs, unsigned seg,
    2.93  	    (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
    2.94  		return ((seg & 0xFFFF) << 4) + off;
    2.95  
    2.96 -	entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 0))[seg >> 3];
    2.97 +	entry = ((unsigned long long *)
    2.98 +                 guest_linear_to_real(oldctx.gdtr_base))[seg >> 3];
    2.99  	entry_high = entry >> 32;
   2.100  	entry_low = entry & 0xFFFFFFFF;
   2.101  
   2.102 @@ -780,7 +826,8 @@ load_seg(unsigned long sel, uint32_t *ba
   2.103  		return 1;
   2.104  	}
   2.105  
   2.106 -	entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 0))[sel >> 3];
   2.107 +	entry = ((unsigned long long *)
   2.108 +                 guest_linear_to_real(oldctx.gdtr_base))[sel >> 3];
   2.109  
   2.110  	/* Check the P bit first */
   2.111  	if (!((entry >> (15+32)) & 0x1) && sel != 0)