ia64/xen-unstable

changeset 4602:e02fc4c21740

bitkeeper revision 1.1345 (4266bd05lHlHunb0CEvOq60j2DvKCQ)

[PATCH] VMX world switch

The attached code implements a VMX world switch to vmxassist (a small assist
module residing in a VMX enabled partition where it is responsible for
emulating real mode) whever CR0.PE is disabled.

The patch temporarily disables the PGE feature flag in cpuid as it is
currently broken (try running an unmodified 2.6 kernel that sets PGE in
mm/init.c/paging_init()).

The patch adds consistency checks before setting the ARCH_VMX_IO_WAIT state
to detect race conditions on SMP systems.

Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
Signed-off-by: ian@xensource.com
author leendert@watson.ibm.com[iap10]
date Wed Apr 20 20:35:17 2005 +0000 (2005-04-20)
parents ebe62f849001
children 717d7dbd06ea
files .rootkeys xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/include/asm-x86/vmx_vmcs.h xen/include/public/vmx_assist.h
line diff
     1.1 --- a/.rootkeys	Wed Apr 20 18:39:42 2005 +0000
     1.2 +++ b/.rootkeys	Wed Apr 20 20:35:17 2005 +0000
     1.3 @@ -1430,6 +1430,7 @@ 41ee5e8c6mLxIx82KPsbpt_uts_vSA xen/inclu
     1.4  4051db79512nOCGweabrFWO2M2h5ng xen/include/public/physdev.h
     1.5  40589968wmhPmV5-ENbBYmMjnedgKw xen/include/public/sched_ctl.h
     1.6  404f3d2eR2Owk-ZcGOx9ULGHg3nrww xen/include/public/trace.h
     1.7 +4266bd01Ul-pC01ZVvBkhBnv5eqzvw xen/include/public/vmx_assist.h
     1.8  3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/public/xen.h
     1.9  3e397e66m2tO3s-J8Jnr7Ws_tGoPTg xen/include/xen/ac_timer.h
    1.10  40715b2epYl2jBbxzz9CI2rgIca7Zg xen/include/xen/acpi.h
     2.1 --- a/xen/arch/x86/vmx.c	Wed Apr 20 18:39:42 2005 +0000
     2.2 +++ b/xen/arch/x86/vmx.c	Wed Apr 20 20:35:17 2005 +0000
     2.3 @@ -195,6 +195,7 @@ static void vmx_vmexit_do_cpuid(unsigned
     2.4      cpuid(input, &eax, &ebx, &ecx, &edx);
     2.5  
     2.6      if (input == 1) {
     2.7 +        clear_bit(X86_FEATURE_PGE, &edx); /* temporarily disabled */
     2.8          clear_bit(X86_FEATURE_PSE, &edx);
     2.9          clear_bit(X86_FEATURE_PAE, &edx);
    2.10          clear_bit(X86_FEATURE_PSE36, &edx);
    2.11 @@ -382,10 +383,261 @@ static void vmx_io_instruction(struct xe
    2.12      do_block();
    2.13  }
    2.14  
    2.15 -static int
    2.16 -vm86assist(struct exec_domain *d)
    2.17 +enum { COPY_IN = 0, COPY_OUT };
    2.18 +
    2.19 +static inline int
    2.20 +vmx_copy(void *buf, unsigned long laddr, int size, int dir)
    2.21 +{
    2.22 +    unsigned char *addr;
    2.23 +    unsigned long mfn;
    2.24 +
    2.25 +    if ((size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE) {
    2.26 +    	printf("vmx_copy exceeds page boundary\n");
    2.27 +	return 0;
    2.28 +    }
    2.29 +
    2.30 +    mfn = phys_to_machine_mapping(gva_to_gpte(laddr) >> PAGE_SHIFT);
    2.31 +    addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK));
    2.32 +
    2.33 +    if (dir == COPY_IN)
    2.34 +	    memcpy(buf, addr, size);
    2.35 +    else
    2.36 +	    memcpy(addr, buf, size);
    2.37 +
    2.38 +    unmap_domain_mem(addr);
    2.39 +    return 1;
    2.40 +}
    2.41 +
    2.42 +int
    2.43 +vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
    2.44 +{
    2.45 +    unsigned long inst_len;
    2.46 +    int error = 0;
    2.47 +
    2.48 +    error |= __vmread(INSTRUCTION_LEN, &inst_len);
    2.49 +    error |= __vmread(GUEST_EIP, &c->eip);
    2.50 +    c->eip += inst_len; /* skip transition instruction */
    2.51 +    error |= __vmread(GUEST_ESP, &c->esp);
    2.52 +    error |= __vmread(GUEST_EFLAGS, &c->eflags);
    2.53 +
    2.54 +    error |= __vmread(CR0_READ_SHADOW, &c->cr0);
    2.55 +    c->cr3 = d->arch.arch_vmx.cpu_cr3;
    2.56 +    error |= __vmread(CR4_READ_SHADOW, &c->cr4);
    2.57 +
    2.58 +    error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
    2.59 +    error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
    2.60 +
    2.61 +    error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
    2.62 +    error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
    2.63 +
    2.64 +    error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
    2.65 +    error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
    2.66 +    error |= __vmread(GUEST_CS_BASE, &c->cs_base);
    2.67 +    error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
    2.68 +
    2.69 +    error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
    2.70 +    error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
    2.71 +    error |= __vmread(GUEST_DS_BASE, &c->ds_base);
    2.72 +    error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
    2.73 +
    2.74 +    error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
    2.75 +    error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
    2.76 +    error |= __vmread(GUEST_ES_BASE, &c->es_base);
    2.77 +    error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
    2.78 +
    2.79 +    error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
    2.80 +    error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
    2.81 +    error |= __vmread(GUEST_SS_BASE, &c->ss_base);
    2.82 +    error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
    2.83 +
    2.84 +    error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
    2.85 +    error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
    2.86 +    error |= __vmread(GUEST_FS_BASE, &c->fs_base);
    2.87 +    error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
    2.88 +
    2.89 +    error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
    2.90 +    error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
    2.91 +    error |= __vmread(GUEST_GS_BASE, &c->gs_base);
    2.92 +    error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
    2.93 +
    2.94 +    error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
    2.95 +    error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
    2.96 +    error |= __vmread(GUEST_TR_BASE, &c->tr_base);
    2.97 +    error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
    2.98 +
    2.99 +    error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
   2.100 +    error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
   2.101 +    error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
   2.102 +    error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
   2.103 +
   2.104 +    return !error;
   2.105 +}
   2.106 +
   2.107 +int
   2.108 +vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
   2.109  {
   2.110 -    /* stay tuned ... */
   2.111 +    unsigned long mfn, old_cr4;
   2.112 +    int error = 0;
   2.113 +
   2.114 +    error |= __vmwrite(GUEST_EIP, c->eip);
   2.115 +    error |= __vmwrite(GUEST_ESP, c->esp);
   2.116 +    error |= __vmwrite(GUEST_EFLAGS, c->eflags);
   2.117 +
   2.118 +    error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
   2.119 +
   2.120 +    if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
   2.121 +	/* 
   2.122 +	 * This is simple TLB flush, implying the guest has 
   2.123 +	 * removed some translation or changed page attributes.
   2.124 +	 * We simply invalidate the shadow.
   2.125 +	 */
   2.126 +	mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
   2.127 +	if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table)) {
   2.128 +	    VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
   2.129 +	    domain_crash_synchronous();
   2.130 +	    return 0;
   2.131 +	}
   2.132 +	shadow_sync_all(d->domain);
   2.133 +    } else {
   2.134 +	/*
   2.135 +	 * If different, make a shadow. Check if the PDBR is valid
   2.136 +	 * first.
   2.137 +	 */
   2.138 +	VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %lx", c->cr3);
   2.139 +	if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
   2.140 +	    VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
   2.141 +	    domain_crash_synchronous(); 
   2.142 +	    return 0;
   2.143 +	}
   2.144 +	mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
   2.145 +	d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   2.146 +	update_pagetables(d);
   2.147 +	/* 
   2.148 +	 * arch.shadow_table should now hold the next CR3 for shadow
   2.149 +	 */
   2.150 +	d->arch.arch_vmx.cpu_cr3 = c->cr3;
   2.151 +	VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", c->cr3);
   2.152 +	__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
   2.153 +    }
   2.154 +
   2.155 +    error |= __vmread(CR4_READ_SHADOW, &old_cr4);
   2.156 +    error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
   2.157 +    error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
   2.158 +
   2.159 +    error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   2.160 +    error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   2.161 +
   2.162 +    error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
   2.163 +    error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
   2.164 +
   2.165 +    error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
   2.166 +    error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
   2.167 +    error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
   2.168 +    error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
   2.169 +
   2.170 +    error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
   2.171 +    error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
   2.172 +    error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
   2.173 +    error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
   2.174 +
   2.175 +    error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
   2.176 +    error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
   2.177 +    error |= __vmwrite(GUEST_ES_BASE, c->es_base);
   2.178 +    error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
   2.179 +
   2.180 +    error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
   2.181 +    error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
   2.182 +    error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
   2.183 +    error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
   2.184 +
   2.185 +    error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
   2.186 +    error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
   2.187 +    error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
   2.188 +    error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
   2.189 +
   2.190 +    error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
   2.191 +    error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
   2.192 +    error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
   2.193 +    error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
   2.194 +
   2.195 +    error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
   2.196 +    error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
   2.197 +    error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
   2.198 +    error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
   2.199 +
   2.200 +    error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
   2.201 +    error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
   2.202 +    error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
   2.203 +    error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
   2.204 +
   2.205 +    return !error;
   2.206 +}
   2.207 +
   2.208 +enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
   2.209 +
   2.210 +int
   2.211 +vmx_assist(struct exec_domain *d, int mode)
   2.212 +{
   2.213 +    struct vmx_assist_context c;
   2.214 +    unsigned long magic, cp;
   2.215 +
   2.216 +    /* make sure vmxassist exists (this is not an error) */
   2.217 +    if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), COPY_IN))
   2.218 +    	return 0;
   2.219 +    if (magic != VMXASSIST_MAGIC)
   2.220 +    	return 0;
   2.221 +
   2.222 +    switch (mode) {
   2.223 +    /*
   2.224 +     * Transfer control to vmxassist.
   2.225 +     * Store the current context in VMXASSIST_OLD_CONTEXT and load
   2.226 +     * the new VMXASSIST_NEW_CONTEXT context. This context was created
   2.227 +     * by vmxassist and will transfer control to it.
   2.228 +     */
   2.229 +    case VMX_ASSIST_INVOKE:
   2.230 +	/* save the old context */
   2.231 +	if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
   2.232 +    	    goto error;
   2.233 +	if (cp != 0) {
   2.234 +    	    if (!vmx_world_save(d, &c))
   2.235 +		goto error;
   2.236 +	    if (!vmx_copy(&c, cp, sizeof(c), COPY_OUT))
   2.237 +		goto error;
   2.238 +	}
   2.239 +
   2.240 +	/* restore the new context, this should activate vmxassist */
   2.241 +	if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), COPY_IN))
   2.242 +	    goto error;
   2.243 +	if (cp != 0) {
   2.244 +            if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
   2.245 +		goto error;
   2.246 +    	    if (!vmx_world_restore(d, &c))
   2.247 +		goto error;
   2.248 +    	    return 1;
   2.249 +	}
   2.250 +	break;
   2.251 +
   2.252 +    /*
   2.253 +     * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
   2.254 +     * above.
   2.255 +     */
   2.256 +    case VMX_ASSIST_RESTORE:
   2.257 +	/* save the old context */
   2.258 +	if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
   2.259 +    	    goto error;
   2.260 +	if (cp != 0) {
   2.261 +            if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
   2.262 +		goto error;
   2.263 +    	    if (!vmx_world_restore(d, &c))
   2.264 +		goto error;
   2.265 +	    return 1;
   2.266 +	}
   2.267 +	break;
   2.268 +    }
   2.269 +
   2.270 +error:
   2.271 +    printf("Failed to transfer to vmxassist\n");
   2.272 +    domain_crash_synchronous(); 
   2.273      return 0;
   2.274  }
   2.275  
   2.276 @@ -399,6 +651,7 @@ static int mov_to_cr(int gp, int cr, str
   2.277  {
   2.278      unsigned long value;
   2.279      unsigned long old_cr;
   2.280 +    unsigned long eip;
   2.281      struct exec_domain *d = current;
   2.282  
   2.283      switch (gp) {
   2.284 @@ -469,15 +722,28 @@ static int mov_to_cr(int gp, int cr, str
   2.285              put_page_and_type(&frame_table[old_base_mfn]);
   2.286          } else {
   2.287              if ((value & X86_CR0_PE) == 0) {
   2.288 -		unsigned long eip;
   2.289 -
   2.290  	        __vmread(GUEST_EIP, &eip);
   2.291                  VMX_DBG_LOG(DBG_LEVEL_1,
   2.292  			"Disabling CR0.PE at %%eip 0x%lx", eip);
   2.293 -		if (vm86assist(d)) {
   2.294 +		if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
   2.295 +		    set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
   2.296 +						&d->arch.arch_vmx.cpu_state);
   2.297  	            __vmread(GUEST_EIP, &eip);
   2.298  		    VMX_DBG_LOG(DBG_LEVEL_1,
   2.299 -			"Transfering control to vm86assist %%eip 0x%lx", eip);
   2.300 +			"Transfering control to vmxassist %%eip 0x%lx", eip);
   2.301 +		    return 0; /* do not update eip! */
   2.302 +		}
   2.303 +	    } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
   2.304 +					&d->arch.arch_vmx.cpu_state)) {
   2.305 +		__vmread(GUEST_EIP, &eip);
   2.306 +		VMX_DBG_LOG(DBG_LEVEL_1,
   2.307 +			"Enabling CR0.PE at %%eip 0x%lx", eip);
   2.308 +		if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
   2.309 +		    clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
   2.310 +						&d->arch.arch_vmx.cpu_state);
   2.311 +		    __vmread(GUEST_EIP, &eip);
   2.312 +		    VMX_DBG_LOG(DBG_LEVEL_1,
   2.313 +			"Restoring to %%eip 0x%lx", eip);
   2.314  		    return 0; /* do not update eip! */
   2.315  		}
   2.316  	    }
   2.317 @@ -549,6 +815,7 @@ static int mov_to_cr(int gp, int cr, str
   2.318           */
   2.319          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
   2.320              vmx_shadow_clear_state(d->domain);
   2.321 +            shadow_sync_all(d->domain);
   2.322          }
   2.323          break;
   2.324      default:
     3.1 --- a/xen/arch/x86/vmx_platform.c	Wed Apr 20 18:39:42 2005 +0000
     3.2 +++ b/xen/arch/x86/vmx_platform.c	Wed Apr 20 20:35:17 2005 +0000
     3.3 @@ -484,6 +484,11 @@ static void send_mmio_req(unsigned long 
     3.4  
     3.5      vm86 = inst_decoder_regs->eflags & X86_EFLAGS_VM;
     3.6  
     3.7 +    if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
     3.8 +        printf("VMX I/O has not yet completed\n");
     3.9 +        domain_crash_synchronous();
    3.10 +    }
    3.11 +
    3.12      set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
    3.13      p->dir = dir;
    3.14      p->pdata_valid = pvalid;
     4.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Wed Apr 20 18:39:42 2005 +0000
     4.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Wed Apr 20 20:35:17 2005 +0000
     4.3 @@ -22,24 +22,15 @@
     4.4  #include <asm/config.h>
     4.5  #include <asm/vmx_cpu.h>
     4.6  #include <asm/vmx_platform.h>
     4.7 +#include <public/vmx_assist.h>
     4.8  
     4.9  extern int start_vmx(void);
    4.10  extern void stop_vmx(void);
    4.11  
    4.12  void vmx_enter_scheduler(void);
    4.13  
    4.14 -union vmcs_arbytes {
    4.15 -    struct arbyte_fields {
    4.16 -        unsigned int 
    4.17 -        seg_type: 4, s: 1, dpl: 2, p: 1, 
    4.18 -        reserved0: 4, avl: 1, reserved1: 1,     
    4.19 -        default_ops_size: 1, g: 1, null_bit: 1, 
    4.20 -        reserved2: 15;
    4.21 -    }  __attribute__((packed)) fields;
    4.22 -    unsigned int bytes;
    4.23 -};
    4.24 -
    4.25  #define VMX_CPU_STATE_PG_ENABLED        0       
    4.26 +#define	VMX_CPU_STATE_ASSIST_ENABLED	1
    4.27  #define VMCS_SIZE                       0x1000
    4.28  
    4.29  struct vmcs_struct {
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/include/public/vmx_assist.h	Wed Apr 20 20:35:17 2005 +0000
     5.3 @@ -0,0 +1,101 @@
     5.4 +/*
     5.5 + * vmx_assist.h: Context definitions for the VMXASSIST world switch.
     5.6 + *
     5.7 + * Leendert van Doorn, leendert@watson.ibm.com
     5.8 + * Copyright (c) 2005, International Business Machines Corporation.
     5.9 + *
    5.10 + * This program is free software; you can redistribute it and/or modify it
    5.11 + * under the terms and conditions of the GNU General Public License,
    5.12 + * version 2, as published by the Free Software Foundation.
    5.13 + *
    5.14 + * This program is distributed in the hope it will be useful, but WITHOUT
    5.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    5.17 + * more details.
    5.18 + *
    5.19 + * You should have received a copy of the GNU General Public License along with
    5.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    5.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    5.22 + *
    5.23 + */
    5.24 +#ifndef _VMX_ASSIST_H_
    5.25 +#define _VMX_ASSIST_H_
    5.26 +
    5.27 +#define	VMXASSIST_BASE		0xE0000
    5.28 +#define	VMXASSIST_MAGIC		0x17101966
    5.29 +#define	VMXASSIST_MAGIC_OFFSET	(VMXASSIST_BASE+8)
    5.30 +
    5.31 +#define	VMXASSIST_NEW_CONTEXT	(VMXASSIST_BASE + 12)
    5.32 +#define	VMXASSIST_OLD_CONTEXT	(VMXASSIST_NEW_CONTEXT + 4)
    5.33 +
    5.34 +#ifndef __ASSEMBLY__
    5.35 +
    5.36 +union vmcs_arbytes {
    5.37 +	struct arbyte_fields {
    5.38 +		unsigned int	seg_type	: 4,
    5.39 +				s		: 1,
    5.40 +				dpl		: 2,
    5.41 +				p		: 1, 
    5.42 +			 	reserved0	: 4,
    5.43 +				avl		: 1,
    5.44 +				reserved1	: 1,     
    5.45 +				default_ops_size: 1,
    5.46 +				g		: 1,
    5.47 +				null_bit	: 1, 
    5.48 +				reserved2	: 15;
    5.49 +	}  __attribute__((packed)) fields;
    5.50 +	unsigned int bytes;
    5.51 +};
    5.52 +
    5.53 +/*
    5.54 + * World switch state
    5.55 + */
    5.56 +typedef struct vmx_assist_context {
    5.57 +	unsigned long		eip;		/* execution pointer */
    5.58 +	unsigned long		esp;		/* stack point */
    5.59 +	unsigned long		eflags;		/* flags register */
    5.60 +	unsigned long		cr0;
    5.61 +	unsigned long		cr3;		/* page table directory */
    5.62 +	unsigned long		cr4;
    5.63 +	unsigned long		idtr_limit;	/* idt */
    5.64 +	unsigned long		idtr_base;
    5.65 +	unsigned long		gdtr_limit;	/* gdt */
    5.66 +	unsigned long		gdtr_base;
    5.67 +	unsigned long		cs_sel;		/* cs selector */
    5.68 +	unsigned long		cs_limit;
    5.69 +	unsigned long		cs_base;
    5.70 +	union vmcs_arbytes	cs_arbytes;
    5.71 +	unsigned long		ds_sel;		/* ds selector */
    5.72 +	unsigned long		ds_limit;
    5.73 +	unsigned long		ds_base;
    5.74 +	union vmcs_arbytes	ds_arbytes;
    5.75 +	unsigned long		es_sel;		/* es selector */
    5.76 +	unsigned long		es_limit;
    5.77 +	unsigned long		es_base;
    5.78 +	union vmcs_arbytes	es_arbytes;
    5.79 +	unsigned long		ss_sel;		/* ss selector */
    5.80 +	unsigned long		ss_limit;
    5.81 +	unsigned long		ss_base;
    5.82 +	union vmcs_arbytes	ss_arbytes;
    5.83 +	unsigned long		fs_sel;		/* fs selector */
    5.84 +	unsigned long		fs_limit;
    5.85 +	unsigned long		fs_base;
    5.86 +	union vmcs_arbytes	fs_arbytes;
    5.87 +	unsigned long		gs_sel;		/* gs selector */
    5.88 +	unsigned long		gs_limit;
    5.89 +	unsigned long		gs_base;
    5.90 +	union vmcs_arbytes	gs_arbytes;
    5.91 +	unsigned long		tr_sel;		/* task selector */
    5.92 +	unsigned long		tr_limit;
    5.93 +	unsigned long		tr_base;
    5.94 +	union vmcs_arbytes	tr_arbytes;
    5.95 +	unsigned long		ldtr_sel;	/* ldtr selector */
    5.96 +	unsigned long		ldtr_limit;
    5.97 +	unsigned long		ldtr_base;
    5.98 +	union vmcs_arbytes	ldtr_arbytes;
    5.99 +} vmx_assist_context_t;
   5.100 +
   5.101 +#endif /* __ASSEMBLY__ */
   5.102 +
   5.103 +#endif /* _VMX_ASSIST_H_ */
   5.104 +