ia64/xen-unstable
changeset 7618:4321438e92a7
Merged.
author | emellor@leeni.uk.xensource.com |
---|---|
date | Wed Nov 02 16:43:32 2005 +0100 (2005-11-02) |
parents | 7f8db234e9db e519f3239a97 |
children | 9cdfcecf4968 fe487b19c379 |
files |
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Wed Nov 02 16:42:29 2005 +0100 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Wed Nov 02 16:43:32 2005 +0100 1.3 @@ -520,7 +520,7 @@ void __init print_cpu_info(struct cpuinf 1.4 printk("\n"); 1.5 } 1.6 1.7 -cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; 1.8 +cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 1.9 1.10 /* This is hacky. :) 1.11 * We're emulating future behavior. 1.12 @@ -562,7 +562,7 @@ void __init early_cpu_init(void) 1.13 #endif 1.14 } 1.15 1.16 -void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr) 1.17 +void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr) 1.18 { 1.19 unsigned long frames[16]; 1.20 unsigned long va; 1.21 @@ -585,7 +585,7 @@ void __init cpu_gdt_init(struct Xgt_desc 1.22 * and IDT. We reload them nevertheless, this function acts as a 1.23 * 'CPU state barrier', nothing should get across. 1.24 */ 1.25 -void __init cpu_init (void) 1.26 +void __cpuinit cpu_init (void) 1.27 { 1.28 int cpu = smp_processor_id(); 1.29 struct tss_struct * t = &per_cpu(init_tss, cpu);
2.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c Wed Nov 02 16:42:29 2005 +0100 2.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c Wed Nov 02 16:43:32 2005 +0100 2.3 @@ -191,10 +191,17 @@ void __init smp_prepare_cpus(unsigned in 2.4 int cpu, rc; 2.5 struct task_struct *idle; 2.6 2.7 - if (max_cpus == 0) 2.8 - return; 2.9 + cpu_data[0] = boot_cpu_data; 2.10 + 2.11 + cpu_2_logical_apicid[0] = 0; 2.12 + x86_cpu_to_apicid[0] = 0; 2.13 2.14 - xen_smp_intr_init(0); 2.15 + current_thread_info()->cpu = 0; 2.16 + cpu_sibling_map[0] = cpumask_of_cpu(0); 2.17 + cpu_core_map[0] = cpumask_of_cpu(0); 2.18 + 2.19 + if (max_cpus != 0) 2.20 + xen_smp_intr_init(0); 2.21 2.22 for (cpu = 1; cpu < max_cpus; cpu++) { 2.23 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); 2.24 @@ -229,16 +236,20 @@ void __init smp_prepare_cpus(unsigned in 2.25 make_page_readonly((void *)cpu_gdt_descr[cpu].address); 2.26 2.27 cpu_set(cpu, cpu_possible_map); 2.28 +#ifdef CONFIG_HOTPLUG_CPU 2.29 if (xen_start_info->flags & SIF_INITDOMAIN) 2.30 cpu_set(cpu, cpu_present_map); 2.31 +#else 2.32 + cpu_set(cpu, cpu_present_map); 2.33 +#endif 2.34 2.35 vcpu_prepare(cpu); 2.36 } 2.37 2.38 /* Currently, Xen gives no dynamic NUMA/HT info. */ 2.39 - for (cpu = 0; cpu < NR_CPUS; cpu++) { 2.40 - cpus_clear(cpu_sibling_map[cpu]); 2.41 - cpus_clear(cpu_core_map[cpu]); 2.42 + for (cpu = 1; cpu < NR_CPUS; cpu++) { 2.43 + cpu_sibling_map[cpu] = cpumask_of_cpu(cpu); 2.44 + cpu_core_map[cpu] = cpumask_of_cpu(cpu); 2.45 } 2.46 2.47 #ifdef CONFIG_X86_IO_APIC 2.48 @@ -256,18 +267,9 @@ void __devinit smp_prepare_boot_cpu(void 2.49 cpu_possible_map = cpumask_of_cpu(0); 2.50 cpu_present_map = cpumask_of_cpu(0); 2.51 cpu_online_map = cpumask_of_cpu(0); 2.52 - 2.53 - cpu_data[0] = boot_cpu_data; 2.54 - cpu_2_logical_apicid[0] = 0; 2.55 - x86_cpu_to_apicid[0] = 0; 2.56 +} 2.57 2.58 - current_thread_info()->cpu = 0; 2.59 - cpus_clear(cpu_sibling_map[0]); 2.60 - cpu_set(0, cpu_sibling_map[0]); 2.61 - 2.62 - cpus_clear(cpu_core_map[0]); 2.63 - cpu_set(0, cpu_core_map[0]); 2.64 -} 2.65 +#ifdef CONFIG_HOTPLUG_CPU 2.66 2.67 static void vcpu_hotplug(unsigned int cpu) 2.68 { 2.69 @@ -288,11 +290,7 @@ static void vcpu_hotplug(unsigned int cp 2.70 cpu_set(cpu, cpu_present_map); 2.71 (void)cpu_up(cpu); 2.72 } else if (strcmp(state, "offline") == 0) { 2.73 -#ifdef CONFIG_HOTPLUG_CPU 2.74 (void)cpu_down(cpu); 2.75 -#else 2.76 - printk(KERN_INFO "Ignoring CPU%d hotplug request\n", cpu); 2.77 -#endif 2.78 } else { 2.79 printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", 2.80 state, cpu); 2.81 @@ -342,8 +340,6 @@ static int __init setup_vcpu_hotplug_eve 2.82 2.83 subsys_initcall(setup_vcpu_hotplug_event); 2.84 2.85 -#ifdef CONFIG_HOTPLUG_CPU 2.86 - 2.87 int __cpu_disable(void) 2.88 { 2.89 cpumask_t map = cpu_online_map;
3.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Wed Nov 02 16:42:29 2005 +0100 3.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Wed Nov 02 16:43:32 2005 +0100 3.3 @@ -35,7 +35,7 @@ 3.4 #endif 3.5 char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; 3.6 3.7 -cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; 3.8 +cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 3.9 3.10 struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 3.11 3.12 @@ -130,7 +130,7 @@ static void switch_pt(void) 3.13 xen_new_user_pt(__pa(init_level4_user_pgt)); 3.14 } 3.15 3.16 -void __init cpu_gdt_init(struct desc_ptr *gdt_descr) 3.17 +void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr) 3.18 { 3.19 unsigned long frames[16]; 3.20 unsigned long va; 3.21 @@ -227,7 +227,7 @@ void syscall_init(void) 3.22 #endif 3.23 } 3.24 3.25 -void __init check_efer(void) 3.26 +void __cpuinit check_efer(void) 3.27 { 3.28 unsigned long efer; 3.29 3.30 @@ -244,7 +244,7 @@ void __init check_efer(void) 3.31 * 'CPU state barrier', nothing should get across. 3.32 * A lot of state is already set up in PDA init. 3.33 */ 3.34 -void __init cpu_init (void) 3.35 +void __cpuinit cpu_init (void) 3.36 { 3.37 #ifdef CONFIG_SMP 3.38 int cpu = stack_smp_processor_id();
4.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Wed Nov 02 16:42:29 2005 +0100 4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Wed Nov 02 16:43:32 2005 +0100 4.3 @@ -458,7 +458,7 @@ static void watch_for_status(struct xenb 4.4 node += strlen(watch->node); 4.5 4.6 /* FIXME: clean up when error on the other end. */ 4.7 - if (info->connected == BLKIF_STATE_CONNECTED) 4.8 + if ((info->connected == BLKIF_STATE_CONNECTED) || info->mi) 4.9 return; 4.10 4.11 err = xenbus_gather(NULL, watch->node,
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/patches/linux-2.6.12/cpu-hotplug-init.patch Wed Nov 02 16:43:32 2005 +0100 5.3 @@ -0,0 +1,34 @@ 5.4 +diff -ur linux-2.6.12.orig/include/linux/init.h linux-2.6.12/include/linux/init.h 5.5 +--- linux-2.6.12.orig/include/linux/init.h 2005-11-01 14:52:28.656025573 +0000 5.6 ++++ linux-2.6.12/include/linux/init.h 2005-11-01 14:53:28.015791549 +0000 5.7 +@@ -229,6 +229,18 @@ 5.8 + #define __devexitdata __exitdata 5.9 + #endif 5.10 + 5.11 ++#ifdef CONFIG_HOTPLUG_CPU 5.12 ++#define __cpuinit 5.13 ++#define __cpuinitdata 5.14 ++#define __cpuexit 5.15 ++#define __cpuexitdata 5.16 ++#else 5.17 ++#define __cpuinit __init 5.18 ++#define __cpuinitdata __initdata 5.19 ++#define __cpuexit __exit 5.20 ++#define __cpuexitdata __exitdata 5.21 ++#endif 5.22 ++ 5.23 + /* Functions marked as __devexit may be discarded at kernel link time, depending 5.24 + on config options. Newer versions of binutils detect references from 5.25 + retained sections to discarded sections and flag an error. Pointers to 5.26 +diff -ur linux-2.6.12.orig/arch/x86_64/kernel/i387.c linux-2.6.12/arch/x86_64/kernel/i387.c 5.27 +--- linux-2.6.12.orig/arch/x86_64/kernel/i387.c 2005-11-01 15:01:58.932991232 +0000 5.28 ++++ linux-2.6.12/arch/x86_64/kernel/i387.c 2005-11-01 15:02:09.729312416 +0000 5.29 +@@ -42,7 +42,7 @@ 5.30 + * Called at bootup to set up the initial FPU state that is later cloned 5.31 + * into all processes. 5.32 + */ 5.33 +-void __init fpu_init(void) 5.34 ++void __cpuinit fpu_init(void) 5.35 + { 5.36 + unsigned long oldcr0 = read_cr0(); 5.37 + extern void __bad_fxsave_alignment(void);
6.1 --- a/tools/console/daemon/io.c Wed Nov 02 16:42:29 2005 +0100 6.2 +++ b/tools/console/daemon/io.c Wed Nov 02 16:43:32 2005 +0100 6.3 @@ -380,12 +380,21 @@ static void cleanup_domain(struct domain 6.4 if (!buffer_empty(&d->buffer)) 6.5 return; 6.6 6.7 - if (d->buffer.data) 6.8 + if (d->buffer.data) { 6.9 free(d->buffer.data); 6.10 - d->buffer.data = NULL; 6.11 - if (d->tty_fd != -1) 6.12 + d->buffer.data = NULL; 6.13 + } 6.14 + 6.15 + if (d->tty_fd != -1) { 6.16 close(d->tty_fd); 6.17 - d->tty_fd = -1; 6.18 + d->tty_fd = -1; 6.19 + } 6.20 + 6.21 + if (d->conspath) { 6.22 + free(d->conspath); 6.23 + d->conspath = NULL; 6.24 + } 6.25 + 6.26 remove_domain(d); 6.27 } 6.28
7.1 --- a/tools/console/daemon/main.c Wed Nov 02 16:42:29 2005 +0100 7.2 +++ b/tools/console/daemon/main.c Wed Nov 02 16:43:32 2005 +0100 7.3 @@ -30,10 +30,14 @@ 7.4 #include "utils.h" 7.5 #include "io.h" 7.6 7.7 -void usage(char *prg) 7.8 +static void usage(char *name) 7.9 { 7.10 - fprintf(stderr, 7.11 - "usage: %s [-h] [-V] [-v] [-i]\n", prg); 7.12 + printf("Usage: %s [-h] [-V] [-v] [-i]\n", name); 7.13 +} 7.14 + 7.15 +static void version(char *name) 7.16 +{ 7.17 + printf("Xen Console Daemon 3.0\n"); 7.18 } 7.19 7.20 int main(int argc, char **argv) 7.21 @@ -58,7 +62,7 @@ int main(int argc, char **argv) 7.22 usage(argv[0]); 7.23 exit(0); 7.24 case 'V': 7.25 - //version(argv[0]); 7.26 + version(argv[0]); 7.27 exit(0); 7.28 case 'v': 7.29 syslog_option |= LOG_PERROR;
8.1 --- a/tools/vtpm_manager/manager/vtpm_manager.c Wed Nov 02 16:42:29 2005 +0100 8.2 +++ b/tools/vtpm_manager/manager/vtpm_manager.c Wed Nov 02 16:43:32 2005 +0100 8.3 @@ -140,12 +140,15 @@ TPM_RESULT VTPM_Create_Service(){ 8.4 TPM_AUTHDATA sharedsecret; 8.5 8.6 TPMTRYRETURN( VTSP_OSAP(vtpm_globals->manager_tcs_handle, 8.7 - TPM_ET_SRK, 8.8 - 0, 8.9 + TPM_ET_KEYHANDLE, 8.10 + TPM_SRK_KEYHANDLE, 8.11 (const TPM_AUTHDATA*)&vtpm_globals->srk_usage_auth, 8.12 &sharedsecret, 8.13 &osap) ); 8.14 - 8.15 + 8.16 + osap.fContinueAuthSession = FALSE; 8.17 + 8.18 + 8.19 TPMTRYRETURN( VTSP_CreateWrapKey( vtpm_globals->manager_tcs_handle, 8.20 TPM_KEY_BIND, 8.21 (const TPM_AUTHDATA*)&vtpm_globals->storage_key_usage_auth,
9.1 --- a/tools/vtpm_manager/manager/vtsp.c Wed Nov 02 16:42:29 2005 +0100 9.2 +++ b/tools/vtpm_manager/manager/vtsp.c Wed Nov 02 16:43:32 2005 +0100 9.3 @@ -180,8 +180,8 @@ TPM_RESULT VTSP_OSAP(const TCS_CONTEXT_H 9.4 Crypto_GetRandom((BYTE *) &nonceOddOSAP, sizeof(TPM_NONCE) ); 9.5 9.6 TPMTRYRETURN( TCSP_OSAP( hContext, 9.7 - TPM_ET_SRK, 9.8 - 0, 9.9 + entityType, 9.10 + entityValue, 9.11 nonceOddOSAP, 9.12 &auth->AuthHandle, 9.13 &auth->NonceEven,
10.1 --- a/tools/vtpm_manager/util/buffer.h Wed Nov 02 16:42:29 2005 +0100 10.2 +++ b/tools/vtpm_manager/util/buffer.h Wed Nov 02 16:43:32 2005 +0100 10.3 @@ -37,18 +37,6 @@ 10.4 #include <stddef.h> // for pointer NULL 10.5 #include "tcg.h" 10.6 10.7 -// structure to enable use of FMT_SIZE32_DATA in BSG_Unpack 10.8 -typedef struct pack_buf_t { 10.9 - UINT32 size; 10.10 - BYTE * data; 10.11 -} pack_buf_t; 10.12 - 10.13 -// and a const version for Pack 10.14 -typedef struct pack_constbuf_t { 10.15 - UINT32 size; 10.16 - const BYTE* data; 10.17 -} pack_constbuf_t; 10.18 - 10.19 typedef UINT32 tpm_size_t; 10.20 10.21 // first version, probably will be expanded...
11.1 --- a/tools/vtpm_manager/util/tcg.h Wed Nov 02 16:42:29 2005 +0100 11.2 +++ b/tools/vtpm_manager/util/tcg.h Wed Nov 02 16:43:32 2005 +0100 11.3 @@ -191,6 +191,20 @@ typedef struct TCS_AUTH { 11.4 TPM_AUTHDATA HMAC; 11.5 } TCS_AUTH; 11.6 11.7 +// structures for dealing with sizes followed by buffers in all the 11.8 +// TCG structure. 11.9 +typedef struct pack_buf_t { 11.10 + UINT32 size; 11.11 + BYTE * data; 11.12 +} pack_buf_t; 11.13 + 11.14 +typedef struct pack_constbuf_t { 11.15 + UINT32 size; 11.16 + const BYTE* data; 11.17 +} pack_constbuf_t; 11.18 + 11.19 + 11.20 + 11.21 // **************************** CONSTANTS ********************************* 11.22 11.23 // BOOL values
12.1 --- a/tools/xenstore/xs.h Wed Nov 02 16:42:29 2005 +0100 12.2 +++ b/tools/xenstore/xs.h Wed Nov 02 16:43:32 2005 +0100 12.3 @@ -136,7 +136,7 @@ bool xs_introduce_domain(struct xs_handl 12.4 */ 12.5 bool xs_release_domain(struct xs_handle *h, unsigned int domid); 12.6 12.7 -/* Query the home path of a domain. 12.8 +/* Query the home path of a domain. Call free() after use. 12.9 */ 12.10 char *xs_get_domain_path(struct xs_handle *h, unsigned int domid); 12.11
13.1 --- a/xen/arch/x86/domain.c Wed Nov 02 16:42:29 2005 +0100 13.2 +++ b/xen/arch/x86/domain.c Wed Nov 02 16:43:32 2005 +0100 13.3 @@ -408,6 +408,9 @@ int arch_set_info_guest( 13.4 if ( !pagetable_get_paddr(d->arch.phys_table) ) 13.5 d->arch.phys_table = v->arch.guest_table; 13.6 13.7 + /* Initialize monitor page table */ 13.8 + v->arch.monitor_table = mk_pagetable(0); 13.9 + 13.10 vmx_final_setup_guest(v); 13.11 } 13.12
14.1 --- a/xen/arch/x86/shadow.c Wed Nov 02 16:42:29 2005 +0100 14.2 +++ b/xen/arch/x86/shadow.c Wed Nov 02 16:43:32 2005 +0100 14.3 @@ -1,19 +1,19 @@ 14.4 /****************************************************************************** 14.5 - * arch/x86/shadow_64.c 14.6 - * 14.7 + * arch/x86/shadow.c 14.8 + * 14.9 * Copyright (c) 2005 Michael A Fetterman 14.10 * Based on an earlier implementation by Ian Pratt et al 14.11 - * 14.12 + * 14.13 * This program is free software; you can redistribute it and/or modify 14.14 * it under the terms of the GNU General Public License as published by 14.15 * the Free Software Foundation; either version 2 of the License, or 14.16 * (at your option) any later version. 14.17 - * 14.18 + * 14.19 * This program is distributed in the hope that it will be useful, 14.20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14.21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14.22 * GNU General Public License for more details. 14.23 - * 14.24 + * 14.25 * You should have received a copy of the GNU General Public License 14.26 * along with this program; if not, write to the Free Software 14.27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 14.28 @@ -55,7 +55,6 @@ static void shadow_map_into_current(stru 14.29 unsigned long va, unsigned int from, unsigned int to); 14.30 static inline void validate_bl2e_change( struct domain *d, 14.31 guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index); 14.32 - 14.33 #endif 14.34 14.35 /******** 14.36 @@ -102,7 +101,6 @@ shadow_promote(struct domain *d, unsigne 14.37 return 1; 14.38 #endif 14.39 return 0; 14.40 - 14.41 } 14.42 14.43 // To convert this page to use as a page table, the writable count 14.44 @@ -490,12 +488,12 @@ static unsigned long shadow_l2_table( 14.45 * We could proactively fill in PDEs for pages that are already 14.46 * shadowed *and* where the guest PDE has _PAGE_ACCESSED set 14.47 * (restriction required for coherence of the accessed bit). However, 14.48 - * we tried it and it didn't help performance. This is simpler. 14.49 + * we tried it and it didn't help performance. This is simpler. 14.50 */ 14.51 memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t)); 14.52 14.53 /* Install hypervisor and 2x linear p.t. mapings. */ 14.54 - memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.55 + memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.56 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.57 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); 14.58 14.59 @@ -522,7 +520,7 @@ static unsigned long shadow_l2_table( 14.60 // 14.61 if ( !get_shadow_ref(hl2mfn) ) 14.62 BUG(); 14.63 - 14.64 + 14.65 spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = 14.66 l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR); 14.67 } 14.68 @@ -532,7 +530,7 @@ static unsigned long shadow_l2_table( 14.69 } 14.70 else 14.71 { 14.72 - memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t)); 14.73 + memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t)); 14.74 } 14.75 14.76 unmap_domain_page(spl2e); 14.77 @@ -543,7 +541,7 @@ static unsigned long shadow_l2_table( 14.78 #endif 14.79 14.80 static void shadow_map_l1_into_current_l2(unsigned long va) 14.81 -{ 14.82 +{ 14.83 struct vcpu *v = current; 14.84 struct domain *d = v->domain; 14.85 l1_pgentry_t *spl1e; 14.86 @@ -596,7 +594,7 @@ static void shadow_map_l1_into_current_l 14.87 #if CONFIG_PAGING_LEVELS >=4 14.88 if (d->arch.ops->guest_paging_levels == PAGING_L2) 14.89 { 14.90 - /* for 32-bit VMX guest on 64-bit host, 14.91 + /* for 32-bit VMX guest on 64-bit host, 14.92 * need update two L2 entries each time 14.93 */ 14.94 if ( !get_shadow_ref(sl1mfn)) 14.95 @@ -624,7 +622,7 @@ static void shadow_map_l1_into_current_l 14.96 l1_pgentry_t sl1e; 14.97 int index = guest_l1_table_offset(va); 14.98 int min = 1, max = 0; 14.99 - 14.100 + 14.101 unsigned long entries, pt_va; 14.102 l1_pgentry_t tmp_sl1e; 14.103 guest_l1_pgentry_t tmp_gl1e;//Prepare for double compile 14.104 @@ -790,7 +788,7 @@ shadow_alloc_oos_entry(struct domain *d) 14.105 14.106 /* Record the allocation block so it can be correctly freed later. */ 14.107 d->arch.out_of_sync_extras_count++; 14.108 - *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) = 14.109 + *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) = 14.110 d->arch.out_of_sync_extras; 14.111 d->arch.out_of_sync_extras = &extra[0]; 14.112 14.113 @@ -1020,7 +1018,7 @@ static int is_out_of_sync(struct vcpu *v 14.114 { 14.115 struct domain *d = v->domain; 14.116 #if defined (__x86_64__) 14.117 - unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)? 14.118 + unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)? 14.119 pagetable_get_pfn(v->arch.guest_table) : 14.120 pagetable_get_pfn(v->arch.guest_table_user)); 14.121 #else 14.122 @@ -1082,7 +1080,7 @@ static int is_out_of_sync(struct vcpu *v 14.123 return 1; 14.124 14.125 __guest_get_l2e(v, va, &l2e); 14.126 - if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) || 14.127 + if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) || 14.128 (guest_l2e_get_flags(l2e) & _PAGE_PSE)) 14.129 return 0; 14.130 14.131 @@ -1155,7 +1153,7 @@ decrease_writable_pte_prediction(struct 14.132 } 14.133 14.134 static int fix_entry( 14.135 - struct domain *d, 14.136 + struct domain *d, 14.137 l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find) 14.138 { 14.139 l1_pgentry_t old = *pt; 14.140 @@ -1194,19 +1192,19 @@ static u32 remove_all_write_access_in_pt 14.141 match = l1e_from_pfn(readonly_gmfn, flags); 14.142 14.143 if ( shadow_mode_external(d) ) { 14.144 - i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 14.145 + i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 14.146 >> PGT_va_shift; 14.147 14.148 if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) && 14.149 - !l1e_has_changed(pt[i], match, flags) && 14.150 + !l1e_has_changed(pt[i], match, flags) && 14.151 fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) && 14.152 !prediction ) 14.153 goto out; 14.154 } 14.155 - 14.156 + 14.157 for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++) 14.158 { 14.159 - if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 14.160 + if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 14.161 fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) ) 14.162 break; 14.163 } 14.164 @@ -1255,7 +1253,7 @@ static int remove_all_write_access( 14.165 } 14.166 14.167 if ( shadow_mode_external(d) ) { 14.168 - if (write_refs-- == 0) 14.169 + if (write_refs-- == 0) 14.170 return 0; 14.171 14.172 // Use the back pointer to locate the shadow page that can contain 14.173 @@ -1275,7 +1273,7 @@ static int remove_all_write_access( 14.174 a = &d->arch.shadow_ht[i]; 14.175 while ( a && a->gpfn_and_flags ) 14.176 { 14.177 - if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 14.178 + if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 14.179 #if CONFIG_PAGING_LEVELS >= 4 14.180 || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow 14.181 #endif 14.182 @@ -1384,10 +1382,10 @@ static int resync_all(struct domain *d, 14.183 if ( (i < min_snapshot) || (i > max_snapshot) || 14.184 guest_l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) ) 14.185 { 14.186 - int error; 14.187 + int error; 14.188 14.189 error = validate_pte_change(d, guest1[i], &shadow1[i]); 14.190 - if ( error == -1 ) 14.191 + if ( error == -1 ) 14.192 unshadow_l1 = 1; 14.193 else { 14.194 need_flush |= error; 14.195 @@ -1474,7 +1472,7 @@ static int resync_all(struct domain *d, 14.196 l2_pgentry_t *guest2 = guest; 14.197 l2_pgentry_t *snapshot2 = snapshot; 14.198 l1_pgentry_t *shadow2 = shadow; 14.199 - 14.200 + 14.201 ASSERT(shadow_mode_write_all(d)); 14.202 BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented 14.203 14.204 @@ -1634,7 +1632,7 @@ static void sync_all(struct domain *d) 14.205 !shadow_get_page_from_l1e(npte, d) ) 14.206 BUG(); 14.207 *ppte = npte; 14.208 - set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 14.209 + set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 14.210 (entry->writable_pl1e & ~PAGE_MASK)/sizeof(l1_pgentry_t)); 14.211 shadow_put_page_from_l1e(opte, d); 14.212 14.213 @@ -1719,7 +1717,7 @@ static inline int l1pte_write_fault( 14.214 14.215 static inline int l1pte_read_fault( 14.216 struct domain *d, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p) 14.217 -{ 14.218 +{ 14.219 guest_l1_pgentry_t gpte = *gpte_p; 14.220 l1_pgentry_t spte = *spte_p; 14.221 unsigned long pfn = l1e_get_pfn(gpte); 14.222 @@ -1761,7 +1759,7 @@ static int shadow_fault_32(unsigned long 14.223 SH_VVLOG("shadow_fault( va=%lx, code=%lu )", 14.224 va, (unsigned long)regs->error_code); 14.225 perfc_incrc(shadow_fault_calls); 14.226 - 14.227 + 14.228 check_pagetable(v, "pre-sf"); 14.229 14.230 /* 14.231 @@ -1804,7 +1802,7 @@ static int shadow_fault_32(unsigned long 14.232 } 14.233 14.234 /* Write fault? */ 14.235 - if ( regs->error_code & 2 ) 14.236 + if ( regs->error_code & 2 ) 14.237 { 14.238 int allow_writes = 0; 14.239 14.240 @@ -1818,7 +1816,7 @@ static int shadow_fault_32(unsigned long 14.241 else 14.242 { 14.243 /* Write fault on a read-only mapping. */ 14.244 - SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", 14.245 + SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", 14.246 l1e_get_intpte(gpte)); 14.247 perfc_incrc(shadow_fault_bail_ro_mapping); 14.248 goto fail; 14.249 @@ -1878,7 +1876,7 @@ static int shadow_fault_32(unsigned long 14.250 check_pagetable(v, "post-sf"); 14.251 return EXCRET_fault_fixed; 14.252 14.253 - fail: 14.254 +fail: 14.255 shadow_unlock(d); 14.256 return 0; 14.257 } 14.258 @@ -1895,7 +1893,7 @@ static int do_update_va_mapping(unsigned 14.259 shadow_lock(d); 14.260 14.261 //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_intpte(val)); 14.262 - 14.263 + 14.264 // This is actually overkill - we don't need to sync the L1 itself, 14.265 // just everything involved in getting to this L1 (i.e. we need 14.266 // linear_pg_table[l1_linear_offset(va)] to be in sync)... 14.267 @@ -1925,7 +1923,7 @@ static int do_update_va_mapping(unsigned 14.268 * and what it uses to get/maintain that mapping. 14.269 * 14.270 * SHADOW MODE: none enable translate external 14.271 - * 14.272 + * 14.273 * 4KB things: 14.274 * guest_vtable lin_l2 mapped per gl2 lin_l2 via hl2 mapped per gl2 14.275 * shadow_vtable n/a sh_lin_l2 sh_lin_l2 mapped per gl2 14.276 @@ -1950,7 +1948,7 @@ static void shadow_update_pagetables(str 14.277 { 14.278 struct domain *d = v->domain; 14.279 #if defined (__x86_64__) 14.280 - unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)? 14.281 + unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)? 14.282 pagetable_get_pfn(v->arch.guest_table) : 14.283 pagetable_get_pfn(v->arch.guest_table_user)); 14.284 #else 14.285 @@ -2006,7 +2004,7 @@ static void shadow_update_pagetables(str 14.286 /* 14.287 * arch.shadow_vtable 14.288 */ 14.289 - if ( max_mode == SHM_external 14.290 + if ( max_mode == SHM_external 14.291 #if CONFIG_PAGING_LEVELS >=4 14.292 || max_mode & SHM_enable 14.293 #endif 14.294 @@ -2241,7 +2239,7 @@ static int check_pte( 14.295 page_table_page); 14.296 FAIL("RW2 coherence"); 14.297 } 14.298 - 14.299 + 14.300 if ( eff_guest_mfn == shadow_mfn ) 14.301 { 14.302 if ( level > 1 ) 14.303 @@ -2291,7 +2289,7 @@ static int check_l1_table( 14.304 errors += check_pte(v, p_guest+i, p_shadow+i, 14.305 p_snapshot ? p_snapshot+i : NULL, 14.306 1, l2_idx, i); 14.307 - 14.308 + 14.309 unmap_domain_page(p_shadow); 14.310 unmap_domain_page(p_guest); 14.311 if ( p_snapshot ) 14.312 @@ -2327,11 +2325,11 @@ static int check_l2_table( 14.313 14.314 #if 0 14.315 if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.316 - &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.317 + &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 14.318 ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) - 14.319 DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) ) 14.320 { 14.321 - for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE; 14.322 + for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE; 14.323 i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT); 14.324 i++ ) 14.325 printk("+++ (%d) %lx %lx\n",i, 14.326 @@ -2339,7 +2337,7 @@ static int check_l2_table( 14.327 FAILPT("hypervisor entries inconsistent"); 14.328 } 14.329 14.330 - if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) != 14.331 + if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) != 14.332 l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) ) 14.333 FAILPT("hypervisor linear map inconsistent"); 14.334 #endif 14.335 @@ -2399,7 +2397,7 @@ static int _check_pagetable(struct vcpu 14.336 { 14.337 struct domain *d = v->domain; 14.338 #if defined (__x86_64__) 14.339 - pagetable_t pt = ((v->arch.flags & TF_kernel_mode)? 14.340 + pagetable_t pt = ((v->arch.flags & TF_kernel_mode)? 14.341 pagetable_get_pfn(v->arch.guest_table) : 14.342 pagetable_get_pfn(v->arch.guest_table_user)); 14.343 #else 14.344 @@ -2434,7 +2432,7 @@ static int _check_pagetable(struct vcpu 14.345 oos_pdes = 1; 14.346 ASSERT(ptbase_mfn); 14.347 } 14.348 - 14.349 + 14.350 errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes); 14.351 14.352 gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn); 14.353 @@ -2565,7 +2563,6 @@ static unsigned long gva_to_gpa_pae(unsi 14.354 * The code is for 32-bit VMX gues on 64-bit host. 14.355 * To sync guest L2. 14.356 */ 14.357 - 14.358 static inline void 14.359 validate_bl2e_change( 14.360 struct domain *d, 14.361 @@ -2596,7 +2593,6 @@ validate_bl2e_change( 14.362 entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx])); 14.363 } 14.364 unmap_domain_page(sl2_p); 14.365 - 14.366 } 14.367 14.368 /* 14.369 @@ -2629,9 +2625,8 @@ static inline unsigned long init_bl2(l4_ 14.370 } 14.371 14.372 unmap_domain_page(spl4e); 14.373 + 14.374 return smfn; 14.375 - 14.376 - 14.377 } 14.378 14.379 static unsigned long shadow_l4_table( 14.380 @@ -2664,7 +2659,7 @@ static unsigned long shadow_l4_table( 14.381 * We could proactively fill in PDEs for pages that are already 14.382 * shadowed *and* where the guest PDE has _PAGE_ACCESSED set 14.383 * (restriction required for coherence of the accessed bit). However, 14.384 - * we tried it and it didn't help performance. This is simpler. 14.385 + * we tried it and it didn't help performance. This is simpler. 14.386 */ 14.387 memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t)); 14.388 14.389 @@ -2757,7 +2752,7 @@ static int get_shadow_mfn(struct domain 14.390 } 14.391 } 14.392 14.393 -static void shadow_map_into_current(struct vcpu *v, 14.394 +static void shadow_map_into_current(struct vcpu *v, 14.395 unsigned long va, unsigned int from, unsigned int to) 14.396 { 14.397 pgentry_64_t gle, sle; 14.398 @@ -2768,7 +2763,7 @@ static void shadow_map_into_current(stru 14.399 return; 14.400 } 14.401 14.402 - __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to); 14.403 + __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to); 14.404 ASSERT(entry_get_flags(gle) & _PAGE_PRESENT); 14.405 gpfn = entry_get_pfn(gle); 14.406 14.407 @@ -2784,7 +2779,7 @@ static void shadow_map_into_current(stru 14.408 /* 14.409 * shadow_set_lxe should be put in shadow.h 14.410 */ 14.411 -static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 14.412 +static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 14.413 int create_l2_shadow, int put_ref_check) 14.414 { 14.415 struct vcpu *v = current; 14.416 @@ -2934,11 +2929,11 @@ static inline int l2e_rw_fault( 14.417 sl2e = l2e_empty(); 14.418 14.419 l1_mfn = ___shadow_status(d, start_gpfn | nx, PGT_fl1_shadow); 14.420 - 14.421 + 14.422 /* Check the corresponding l2e */ 14.423 if (l1_mfn) { 14.424 /* Why it is PRESENT?*/ 14.425 - if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) && 14.426 + if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) && 14.427 l2e_get_pfn(sl2e) == l1_mfn) { 14.428 ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", l2e_get_pfn(sl2e), l1_mfn); 14.429 } else { 14.430 @@ -2985,7 +2980,7 @@ static inline int l2e_rw_fault( 14.431 sl1e = l1e_from_pfn(mfn, l2e_get_flags(tmp_l2e)); 14.432 14.433 if (!rw) { 14.434 - if ( shadow_mode_log_dirty(d) || 14.435 + if ( shadow_mode_log_dirty(d) || 14.436 !(l2e_get_flags(gl2e) & _PAGE_DIRTY) || mfn_is_page_table(mfn) ) 14.437 { 14.438 l1e_remove_flags(sl1e, _PAGE_RW); 14.439 @@ -3034,7 +3029,7 @@ static inline int l2e_rw_fault( 14.440 */ 14.441 #if defined( GUEST_PGENTRY_32 ) 14.442 static inline int guest_page_fault(struct vcpu *v, 14.443 - unsigned long va, unsigned int error_code, 14.444 + unsigned long va, unsigned int error_code, 14.445 guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e) 14.446 { 14.447 /* The following check for 32-bit guest on 64-bit host */ 14.448 @@ -3076,7 +3071,7 @@ static inline int guest_page_fault(struc 14.449 } 14.450 #else 14.451 static inline int guest_page_fault(struct vcpu *v, 14.452 - unsigned long va, unsigned int error_code, 14.453 + unsigned long va, unsigned int error_code, 14.454 guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e) 14.455 { 14.456 struct domain *d = v->domain; 14.457 @@ -3144,7 +3139,7 @@ static int shadow_fault_64(unsigned long 14.458 14.459 perfc_incrc(shadow_fault_calls); 14.460 14.461 - ESH_LOG("<shadow_fault_64> va=%lx, rip = %lx, error code = %x\n", 14.462 + ESH_LOG("<shadow_fault_64> va=%lx, rip = %lx, error code = %x\n", 14.463 va, regs->eip, regs->error_code); 14.464 14.465 /* 14.466 @@ -3166,12 +3161,12 @@ static int shadow_fault_64(unsigned long 14.467 v, va, regs->error_code, &gl2e, &gl1e) ) { 14.468 goto fail; 14.469 } 14.470 - 14.471 + 14.472 if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PSE)) ) { 14.473 /* 14.474 * Handle 4K pages here 14.475 */ 14.476 - 14.477 + 14.478 /* Write fault? */ 14.479 if ( regs->error_code & 2 ) { 14.480 if ( !l1pte_write_fault(v, &gl1e, &sl1e, va) ) { 14.481 @@ -3194,7 +3189,7 @@ static int shadow_fault_64(unsigned long 14.482 */ 14.483 if ( unlikely(shadow_mode_log_dirty(d)) ) 14.484 __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e))); 14.485 - 14.486 + 14.487 } else { 14.488 /* 14.489 * Handle 2M pages here 14.490 @@ -3262,7 +3257,7 @@ static unsigned long gva_to_gpa_64(unsig 14.491 14.492 if (guest_page_fault(v, gva, 0, &gl2e, &gl1e)) 14.493 return 0; 14.494 - 14.495 + 14.496 if (guest_l2e_get_flags(gl2e) & _PAGE_PSE) 14.497 gpa = guest_l2e_get_paddr(gl2e) + (gva & ((1 << GUEST_L2_PAGETABLE_SHIFT) - 1)); 14.498 else
15.1 --- a/xen/arch/x86/vmx_platform.c Wed Nov 02 16:42:29 2005 +0100 15.2 +++ b/xen/arch/x86/vmx_platform.c Wed Nov 02 16:43:32 2005 +0100 15.3 @@ -303,20 +303,20 @@ static void init_instruction(struct inst 15.4 mmio_inst->flags = 0; 15.5 } 15.6 15.7 -#define GET_OP_SIZE_FOR_BYTE(op_size) \ 15.8 - do { \ 15.9 - if (rex) \ 15.10 - op_size = BYTE_64; \ 15.11 - else \ 15.12 - op_size = BYTE; \ 15.13 +#define GET_OP_SIZE_FOR_BYTE(op_size) \ 15.14 + do { \ 15.15 + if (rex) \ 15.16 + op_size = BYTE_64; \ 15.17 + else \ 15.18 + op_size = BYTE; \ 15.19 } while(0) 15.20 15.21 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \ 15.22 - do { \ 15.23 - if (rex & 0x8) \ 15.24 - op_size = QUAD; \ 15.25 - else if (op_size != WORD) \ 15.26 - op_size = LONG; \ 15.27 + do { \ 15.28 + if (rex & 0x8) \ 15.29 + op_size = QUAD; \ 15.30 + else if (op_size != WORD) \ 15.31 + op_size = LONG; \ 15.32 } while(0) 15.33 15.34 15.35 @@ -398,8 +398,9 @@ static int vmx_decode(unsigned char *opc 15.36 15.37 case 0x20: /* and r8, m8 */ 15.38 instr->instr = INSTR_AND; 15.39 - GET_OP_SIZE_FOR_BYTE(instr->op_size); 15.40 - return reg_mem(instr->op_size, opcode, instr, rex); 15.41 + instr->op_size = BYTE; 15.42 + GET_OP_SIZE_FOR_BYTE(size_reg); 15.43 + return reg_mem(size_reg, opcode, instr, rex); 15.44 15.45 case 0x21: /* and r32/16, m32/16 */ 15.46 instr->instr = INSTR_AND; 15.47 @@ -413,8 +414,9 @@ static int vmx_decode(unsigned char *opc 15.48 15.49 case 0x30: /* xor r8, m8 */ 15.50 instr->instr = INSTR_XOR; 15.51 - GET_OP_SIZE_FOR_BYTE(instr->op_size); 15.52 - return reg_mem(instr->op_size, opcode, instr, rex); 15.53 + instr->op_size = BYTE; 15.54 + GET_OP_SIZE_FOR_BYTE(size_reg); 15.55 + return reg_mem(size_reg, opcode, instr, rex); 15.56 15.57 case 0x31: /* xor r32/16, m32/16 */ 15.58 instr->instr = INSTR_XOR; 15.59 @@ -592,7 +594,7 @@ static int vmx_decode(unsigned char *opc 15.60 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER); 15.61 return DECODE_success; 15.62 15.63 - case 0xB7: /* movz m16, r32 */ 15.64 + case 0xB7: /* movz m16/m32, r32/r64 */ 15.65 instr->instr = INSTR_MOVZ; 15.66 index = get_index(opcode + 1, rex); 15.67 if (rex & 0x8) { 15.68 @@ -689,9 +691,9 @@ static void mmio_operands(int type, unsi 15.69 struct mmio_op *mmio_opp, struct cpu_user_regs *regs) 15.70 { 15.71 unsigned long value = 0; 15.72 - int index, size; 15.73 + int index, size_reg; 15.74 15.75 - size = operand_size(inst->operand[0]); 15.76 + size_reg = operand_size(inst->operand[0]); 15.77 15.78 mmio_opp->flags = inst->flags; 15.79 mmio_opp->instr = inst->instr; 15.80 @@ -701,14 +703,17 @@ static void mmio_operands(int type, unsi 15.81 15.82 if (inst->operand[0] & REGISTER) { /* dest is memory */ 15.83 index = operand_index(inst->operand[0]); 15.84 - value = get_reg_value(size, index, 0, regs); 15.85 + value = get_reg_value(size_reg, index, 0, regs); 15.86 send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0); 15.87 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */ 15.88 value = inst->immediate; 15.89 send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0); 15.90 } else if (inst->operand[0] & MEMORY) { /* dest is register */ 15.91 /* send the request and wait for the value */ 15.92 - send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0); 15.93 + if (inst->instr == INSTR_MOVZ) 15.94 + send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, 0); 15.95 + else 15.96 + send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0); 15.97 } else { 15.98 printf("mmio_operands: invalid operand\n"); 15.99 domain_crash_synchronous();