ia64/xen-unstable

changeset 11130:155c11be3b89

[VMX] Fix vmxassist for booting Vista.

This patch fixes 2 vmxassist bugs to boot Vista:
1) When switching from VM86_REAL_TO_PROTECTED to VM86_PROTECTED,
protected_mode() sets return eip to switch_to_protected_mode and
segments to vmxassist's. But in later set_mode(), it still consides
these segments are guest's (Bios at this time) and use its gdt table
to calculate address(), which results in segment limitation fault. 2) In
current vmxassist, when entering VM86_PROTECTED_TO_REAL, we need to
decode every instruction manually until we catch instructions like
jmpl to reset cs to really return to real mode. Vista boot manager uses
more instructions.

Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
author kfraser@localhost.localdomain
date Tue Aug 15 10:45:05 2006 +0100 (2006-08-15)
parents 651d2d5d2856
children 9ced76fd7d9b
files tools/firmware/vmxassist/vm86.c
line diff
     1.1 --- a/tools/firmware/vmxassist/vm86.c	Tue Aug 15 10:43:43 2006 +0100
     1.2 +++ b/tools/firmware/vmxassist/vm86.c	Tue Aug 15 10:45:05 2006 +0100
     1.3 @@ -983,7 +983,9 @@ set_mode(struct regs *regs, enum vm86_mo
     1.4  	case VM86_PROTECTED:
     1.5  		if (mode == VM86_REAL_TO_PROTECTED) {
     1.6  			protected_mode(regs);
     1.7 -			break;
     1.8 +//			printf("<VM86_PROTECTED>\n");
     1.9 +			mode = newmode;
    1.10 +			return;
    1.11  		} else
    1.12  			panic("unexpected protected mode transition");
    1.13  		break;
    1.14 @@ -1170,6 +1172,26 @@ inbyte(struct regs *regs, unsigned prefi
    1.15  	return 1;
    1.16  }
    1.17  
    1.18 +static void
    1.19 +pushrm(struct regs *regs, int prefix, unsigned modrm)
    1.20 +{
    1.21 +	unsigned n = regs->eip;
    1.22 +	unsigned addr;
    1.23 +	unsigned data;
    1.24 +
    1.25 +	addr  = operand(prefix, regs, modrm);
    1.26 +	
    1.27 +	if (prefix & DATA32) {
    1.28 +		data = read32(addr);
    1.29 +		push32(regs, data);
    1.30 +	} else {
    1.31 +		data = read16(addr);
    1.32 +		push16(regs, data);
    1.33 +	}
    1.34 +
    1.35 +	TRACE((regs, (regs->eip - n) + 1, "push *0x%x", addr));
    1.36 +}
    1.37 +
    1.38  enum { OPC_INVALID, OPC_EMULATED };
    1.39  
    1.40  /*
    1.41 @@ -1186,6 +1208,14 @@ opcode(struct regs *regs)
    1.42  
    1.43  	for (;;) {
    1.44  		switch ((opc = fetch8(regs))) {
    1.45 +		case 0x07:
    1.46 +			if (prefix & DATA32)
    1.47 +				regs->ves = pop32(regs);
    1.48 +			else
    1.49 +				regs->ves = pop16(regs);
    1.50 +			TRACE((regs, regs->eip - eip, "pop %%es"));
    1.51 +			return OPC_EMULATED;
    1.52 +
    1.53  		case 0x0F: /* two byte opcode */
    1.54  			if (mode == VM86_PROTECTED)
    1.55  				goto invalid;
    1.56 @@ -1288,6 +1318,22 @@ opcode(struct regs *regs)
    1.57                          return OPC_EMULATED;
    1.58  
    1.59  		case 0x89: /* addr32 mov r16, r/m16 */
    1.60 +			if (mode == VM86_PROTECTED_TO_REAL) {
    1.61 +				unsigned modrm = fetch8(regs);
    1.62 +				unsigned addr = operand(prefix, regs, modrm);
    1.63 +				unsigned val, r = (modrm >> 3) & 7;
    1.64 +				
    1.65 +				if (prefix & DATA32) {
    1.66 +					val = getreg16(regs, r);
    1.67 +					write32(addr, val);
    1.68 +				} else {
    1.69 +					val = getreg32(regs, r);
    1.70 +					write16(addr, MASK16(val));
    1.71 +				}
    1.72 +				TRACE((regs, regs->eip - eip,
    1.73 +					"mov %%%s, *0x%x", rnames[r], addr));
    1.74 +				return OPC_EMULATED;
    1.75 +			}
    1.76  		case 0x8B: /* addr32 mov r/m16, r16 */
    1.77  			if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
    1.78  				goto invalid;
    1.79 @@ -1326,6 +1372,37 @@ opcode(struct regs *regs)
    1.80  			regs->eflags |= EFLAGS_VM;
    1.81  			return OPC_EMULATED;
    1.82  
    1.83 +		case 0xA1: /* mov ax, r/m16 */ 
    1.84 +			{
    1.85 +				int addr, data;
    1.86 +				int seg = segment(prefix, regs, regs->vds);
    1.87 +				if (prefix & DATA32) {
    1.88 +					addr = address(regs, seg, fetch32(regs));
    1.89 +					data = read32(addr);
    1.90 +					setreg32(regs, 0, data);
    1.91 +				} else {
    1.92 +					addr = address(regs, seg, fetch16(regs));
    1.93 +					data = read16(addr);
    1.94 +					setreg16(regs, 0, data);
    1.95 +				}
    1.96 +				TRACE((regs, regs->eip - eip, "mov *0x%x, %%ax", addr));
    1.97 +			}
    1.98 +			return OPC_EMULATED;
    1.99 +
   1.100 +		case 0xBB: /* mov bx, imm16 */
   1.101 +			{
   1.102 +				int data;
   1.103 +				if (prefix & DATA32) {
   1.104 +					data = fetch32(regs);
   1.105 +					setreg32(regs, 3, data);
   1.106 +				} else {
   1.107 +					data = fetch16(regs);
   1.108 +					setreg16(regs, 3, data);
   1.109 +				}
   1.110 +				TRACE((regs, regs->eip - eip, "mov $0x%x, %%bx", data));
   1.111 +			}
   1.112 +			return OPC_EMULATED;
   1.113 +
   1.114  		case 0xC6: /* addr32 movb $imm, r/m8 */
   1.115                          if ((prefix & ADDR32) == 0)
   1.116                                  goto invalid;
   1.117 @@ -1380,21 +1457,25 @@ opcode(struct regs *regs)
   1.118  			goto invalid;
   1.119  
   1.120  		case 0xFF: /* jmpl (indirect) */
   1.121 -			if ((mode == VM86_REAL_TO_PROTECTED) ||
   1.122 -			    (mode == VM86_PROTECTED_TO_REAL)) {
   1.123 -			 	unsigned modrm = fetch8(regs);
   1.124 -				
   1.125 +			{
   1.126 +				unsigned modrm = fetch8(regs);
   1.127  				switch((modrm >> 3) & 7) {
   1.128 -				case 5:
   1.129 -				  jmpl_indirect(regs, prefix, modrm);
   1.130 -				  return OPC_INVALID;
   1.131 +				case 5: /* jmpl (indirect) */
   1.132 +					if ((mode == VM86_REAL_TO_PROTECTED) ||
   1.133 +					    (mode == VM86_PROTECTED_TO_REAL)) {
   1.134 +						jmpl_indirect(regs, prefix, modrm);
   1.135 +						return OPC_INVALID;
   1.136 +					}
   1.137 +					goto invalid;
   1.138 +
   1.139 +				case 6: /* push r/m16 */
   1.140 +					pushrm(regs, prefix, modrm);
   1.141 +					return OPC_EMULATED;
   1.142  
   1.143  				default:
   1.144 -				  break;
   1.145 +					goto invalid;
   1.146  				}
   1.147 -
   1.148  			}
   1.149 -			goto invalid;
   1.150  
   1.151  		case 0xEB: /* short jump */
   1.152  			if ((mode == VM86_REAL_TO_PROTECTED) ||