ia64/xen-unstable

changeset 3295:c65b544a8c4e

bitkeeper revision 1.1159.1.488 (41c1acbbhUN0iUWmupmPB85ghWRehg)

Many files:
x86/64 fixes.
author kaf24@pb001.cl.cam.ac.uk
date Thu Dec 16 15:41:47 2004 +0000 (2004-12-16)
parents cd853615e655
children 47157bca9ab0
files BitKeeper/etc/logging_ok xen/arch/x86/Makefile xen/arch/x86/boot/x86_64.S xen/arch/x86/domain.c xen/arch/x86/shadow.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/usercopy.c xen/include/asm-x86/config.h xen/include/asm-x86/mm.h xen/include/asm-x86/pda.h xen/include/asm-x86/processor.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_64/current.h
line diff
     1.1 --- a/BitKeeper/etc/logging_ok	Thu Dec 16 15:15:47 2004 +0000
     1.2 +++ b/BitKeeper/etc/logging_ok	Thu Dec 16 15:41:47 2004 +0000
     1.3 @@ -27,6 +27,7 @@ jws@cairnwell.research
     1.4  kaf24@camelot.eng.3leafnetworks.com
     1.5  kaf24@freefall.cl.cam.ac.uk
     1.6  kaf24@labyrinth.cl.cam.ac.uk
     1.7 +kaf24@pb001.cl.cam.ac.uk
     1.8  kaf24@penguin.local
     1.9  kaf24@plym.cl.cam.ac.uk
    1.10  kaf24@scramble.cl.cam.ac.uk
     2.1 --- a/xen/arch/x86/Makefile	Thu Dec 16 15:15:47 2004 +0000
     2.2 +++ b/xen/arch/x86/Makefile	Thu Dec 16 15:41:47 2004 +0000
     2.3 @@ -12,6 +12,12 @@ OBJS += $(patsubst %.c,%.o,$(wildcard mt
     2.4  
     2.5  OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
     2.6  
     2.7 +ifneq ($(TARGET_SUBARCH),i386)
     2.8 +OBJS := $(subst vmx.o,,$(OBJS))
     2.9 +OBJS := $(subst vmx_io.o,,$(OBJS))
    2.10 +OBJS := $(subst vmx_vmcs.o,,$(OBJS))
    2.11 +endif ($(TARGET_SUBARCH),i386)
    2.12 +
    2.13  default: boot/$(TARGET_SUBARCH).o $(OBJS) boot/mkelf32
    2.14  	$(LD) $(LDFLAGS) -r -o arch.o $(OBJS)
    2.15  	$(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \
     3.1 --- a/xen/arch/x86/boot/x86_64.S	Thu Dec 16 15:15:47 2004 +0000
     3.2 +++ b/xen/arch/x86/boot/x86_64.S	Thu Dec 16 15:41:47 2004 +0000
     3.3 @@ -252,11 +252,14 @@ map_domain_mem:
     3.4  unmap_domain_mem:
     3.5  ret_from_intr:
     3.6  #undef machine_to_phys_mapping
     3.7 +#undef phys_to_machine_mapping
     3.8  .globl copy_to_user, set_intr_gate, die, machine_to_phys_mapping
     3.9 +.globl phys_to_machine_mapping
    3.10  copy_to_user:
    3.11  set_intr_gate:
    3.12  die:
    3.13  machine_to_phys_mapping:
    3.14 +phys_to_machine_mapping:
    3.15  .globl copy_from_user, show_registers, do_iopl
    3.16  copy_from_user: 
    3.17  show_registers: 
     4.1 --- a/xen/arch/x86/domain.c	Thu Dec 16 15:15:47 2004 +0000
     4.2 +++ b/xen/arch/x86/domain.c	Thu Dec 16 15:41:47 2004 +0000
     4.3 @@ -61,7 +61,7 @@ static void default_idle(void)
     4.4          __sti();
     4.5  }
     4.6  
     4.7 -static void idle_loop(void)
     4.8 +void idle_loop(void)
     4.9  {
    4.10      int cpu = smp_processor_id();
    4.11      for ( ; ; )
     5.1 --- a/xen/arch/x86/shadow.c	Thu Dec 16 15:15:47 2004 +0000
     5.2 +++ b/xen/arch/x86/shadow.c	Thu Dec 16 15:41:47 2004 +0000
     5.3 @@ -450,7 +450,7 @@ unsigned long shadow_l2_table(
     5.4  {
     5.5      struct pfn_info *spfn_info;
     5.6      unsigned long    spfn;
     5.7 -    l2_pgentry_t    *spl2e = 0, *gpl2e;
     5.8 +    l2_pgentry_t    *spl2e = 0;
     5.9      unsigned long guest_gpfn;
    5.10  
    5.11      __get_machine_to_phys(m, guest_gpfn, gpfn);
    5.12 @@ -471,17 +471,19 @@ unsigned long shadow_l2_table(
    5.13   
    5.14  #ifdef __i386__
    5.15      /* Install hypervisor and 2x linear p.t. mapings. */
    5.16 -    if (m->shadow_mode == SHM_full_32) 
    5.17 +    if ( m->shadow_mode == SHM_full_32 )
    5.18 +    {
    5.19          vmx_update_shadow_state(m, gpfn, spfn);
    5.20 -    else {
    5.21 +    }
    5.22 +    else
    5.23 +    {
    5.24          spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
    5.25 -        // can't use the linear map as we may not be in the right PT
    5.26 -        gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
    5.27          /*
    5.28 -         * We could proactively fill in PDEs for pages that are already shadowed.
    5.29 -         * However, we tried it and it didn't help performance. This is simpler.
    5.30 +         * We could proactively fill in PDEs for pages that are already
    5.31 +         * shadowed. However, we tried it and it didn't help performance.
    5.32 +         * This is simpler.
    5.33           */
    5.34 -        memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    5.35 +        memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
    5.36  
    5.37          /* Install hypervisor and 2x linear p.t. mapings. */
    5.38          memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    5.39 @@ -497,10 +499,8 @@ unsigned long shadow_l2_table(
    5.40      }
    5.41  #endif
    5.42  
    5.43 -    if (m->shadow_mode != SHM_full_32) 
    5.44 -    {                           
    5.45 +    if ( m->shadow_mode != SHM_full_32 ) 
    5.46          unmap_domain_mem(spl2e);
    5.47 -    }
    5.48  
    5.49      SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
    5.50      return spfn;
     6.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Dec 16 15:15:47 2004 +0000
     6.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Dec 16 15:41:47 2004 +0000
     6.3 @@ -38,14 +38,14 @@ void __dummy__(void)
     6.4      OFFSET(XREGS_ss, struct xen_regs, ss);
     6.5      BLANK();
     6.6  
     6.7 -    OFFSET(DOMAIN_processor, struct domain, processor);
     6.8 -    OFFSET(DOMAIN_shared_info, struct domain, shared_info);
     6.9 -    OFFSET(DOMAIN_event_sel, struct domain, thread.event_selector);
    6.10 -    OFFSET(DOMAIN_event_addr, struct domain, thread.event_address);
    6.11 -    OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector);
    6.12 -    OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address);
    6.13 -    OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce);
    6.14 -    OFFSET(DOMAIN_thread_flags, struct domain, thread.flags);
    6.15 +    OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    6.16 +    OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    6.17 +    OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector);
    6.18 +    OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address);
    6.19 +    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector);
    6.20 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address);
    6.21 +    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce);
    6.22 +    OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags);
    6.23      BLANK();
    6.24  
    6.25      OFFSET(SHINFO_upcall_pending, shared_info_t, 
     7.1 --- a/xen/arch/x86/x86_64/usercopy.c	Thu Dec 16 15:15:47 2004 +0000
     7.2 +++ b/xen/arch/x86/x86_64/usercopy.c	Thu Dec 16 15:41:47 2004 +0000
     7.3 @@ -88,7 +88,7 @@ unsigned long __clear_user(void *addr, u
     7.4  		"	.quad 1b,2b\n"
     7.5  		".previous"
     7.6  		: [size8] "=c"(size), [dst] "=&D" (__d0)
     7.7 -		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst] "(addr),
     7.8 +		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
     7.9  		  [zero] "r" (0UL), [eight] "r" (8UL));
    7.10  	return size;
    7.11  }
     8.1 --- a/xen/include/asm-x86/config.h	Thu Dec 16 15:15:47 2004 +0000
     8.2 +++ b/xen/include/asm-x86/config.h	Thu Dec 16 15:41:47 2004 +0000
     8.3 @@ -4,10 +4,13 @@
     8.4   * A Linux-style configuration list.
     8.5   */
     8.6  
     8.7 -#ifndef __XEN_I386_CONFIG_H__
     8.8 -#define __XEN_I386_CONFIG_H__
     8.9 +#ifndef __X86_CONFIG_H__
    8.10 +#define __X86_CONFIG_H__
    8.11  
    8.12 +#ifdef __i386__
    8.13  #define CONFIG_VMX 1
    8.14 +#endif
    8.15 +
    8.16  #define CONFIG_X86 1
    8.17  
    8.18  #define CONFIG_SMP 1
    8.19 @@ -228,4 +231,4 @@ extern unsigned long xenheap_phys_end; /
    8.20  #define ELFSIZE 32
    8.21  #endif
    8.22  
    8.23 -#endif /* __XEN_I386_CONFIG_H__ */
    8.24 +#endif /* __X86_CONFIG_H__ */
     9.1 --- a/xen/include/asm-x86/mm.h	Thu Dec 16 15:15:47 2004 +0000
     9.2 +++ b/xen/include/asm-x86/mm.h	Thu Dec 16 15:41:47 2004 +0000
     9.3 @@ -215,6 +215,7 @@ void synchronise_pagetables(unsigned lon
     9.4   * contiguous (or near contiguous) physical memory.
     9.5   */
     9.6  #undef  machine_to_phys_mapping
     9.7 +
     9.8  /*
     9.9   * The phys_to_machine_mapping is the reversed mapping of MPT for full
    9.10   * virtualization.
    9.11 @@ -223,12 +224,11 @@ void synchronise_pagetables(unsigned lon
    9.12  
    9.13  #ifdef __x86_64__
    9.14  extern unsigned long *machine_to_phys_mapping;
    9.15 +extern unsigned long *phys_to_machine_mapping;
    9.16  #else
    9.17  #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
    9.18 -#ifdef CONFIG_VMX
    9.19  #define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
    9.20  #endif
    9.21 -#endif
    9.22  
    9.23  #define DEFAULT_GDT_ENTRIES     (LAST_RESERVED_GDT_ENTRY+1)
    9.24  #define DEFAULT_GDT_ADDRESS     ((unsigned long)gdt_table)
    10.1 --- a/xen/include/asm-x86/pda.h	Thu Dec 16 15:15:47 2004 +0000
    10.2 +++ b/xen/include/asm-x86/pda.h	Thu Dec 16 15:41:47 2004 +0000
    10.3 @@ -9,7 +9,7 @@ struct x8664_pda {
    10.4  	unsigned long kernelstack;  /* TOS for current process */ 
    10.5  	unsigned long oldrsp; 	    /* user rsp for system call */
    10.6  	unsigned long irqrsp;	    /* Old rsp for interrupts. */ 
    10.7 -	struct domain *pcurrent;	/* Current process */
    10.8 +	struct exec_domain *pcurrent;	/* Current process */
    10.9          int irqcount;		    /* Irq nesting counter. Starts with -1 */  	
   10.10  	int cpunumber;		    /* Logical CPU number */
   10.11  	char *irqstackptr;	/* top of irqstack */
    11.1 --- a/xen/include/asm-x86/processor.h	Thu Dec 16 15:15:47 2004 +0000
    11.2 +++ b/xen/include/asm-x86/processor.h	Thu Dec 16 15:41:47 2004 +0000
    11.3 @@ -478,17 +478,12 @@ struct mm_struct {
    11.4      l1_pgentry_t *perdomain_ptes;
    11.5      pagetable_t  pagetable;
    11.6  
    11.7 -#ifdef CONFIG_VMX
    11.8 -
    11.9 -#define SHM_full_32     (8) /* full virtualization for 32-bit */
   11.10 -
   11.11 -        pagetable_t  monitor_table;
   11.12 -        l2_pgentry_t *vpagetable;	/* virtual address of pagetable */
   11.13 -        l2_pgentry_t *shadow_vtable;	/* virtual address of shadow_table */
   11.14 -        l2_pgentry_t *guest_pl2e_cache;	/* guest page directory cache */
   11.15 -        unsigned long min_pfn;		/* min host physical */
   11.16 -        unsigned long max_pfn;		/* max host physical */
   11.17 -#endif
   11.18 +    pagetable_t  monitor_table;
   11.19 +    l2_pgentry_t *vpagetable;	/* virtual address of pagetable */
   11.20 +    l2_pgentry_t *shadow_vtable;	/* virtual address of shadow_table */
   11.21 +    l2_pgentry_t *guest_pl2e_cache;	/* guest page directory cache */
   11.22 +    unsigned long min_pfn;		/* min host physical */
   11.23 +    unsigned long max_pfn;		/* max host physical */
   11.24  
   11.25      /* shadow mode status and controls */
   11.26      unsigned int shadow_mode;  /* flags to control shadow table operation */
    12.1 --- a/xen/include/asm-x86/x86_32/current.h	Thu Dec 16 15:15:47 2004 +0000
    12.2 +++ b/xen/include/asm-x86/x86_32/current.h	Thu Dec 16 15:41:47 2004 +0000
    12.3 @@ -6,20 +6,20 @@ struct domain;
    12.4  #define STACK_RESERVED \
    12.5      (sizeof(execution_context_t) + sizeof(struct domain *))
    12.6  
    12.7 -static inline struct exec_domain * get_current(void)
    12.8 +static inline struct exec_domain *get_current(void)
    12.9  {
   12.10 -    struct exec_domain *current;
   12.11 +    struct exec_domain *ed;
   12.12      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" 
   12.13 -              : "=r" (current) : "0" (STACK_SIZE-4) );
   12.14 -    return current;
   12.15 +              : "=r" (ed) : "0" (STACK_SIZE-4) );
   12.16 +    return ed;
   12.17  }
   12.18   
   12.19  #define current get_current()
   12.20  
   12.21 -static inline void set_current(struct exec_domain *p)
   12.22 +static inline void set_current(struct exec_domain *ed)
   12.23  {
   12.24      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" 
   12.25 -              : : "r" (STACK_SIZE-4), "r" (p) );    
   12.26 +              : : "r" (STACK_SIZE-4), "r" (ed) );    
   12.27  }
   12.28  
   12.29  static inline execution_context_t *get_execution_context(void)
    13.1 --- a/xen/include/asm-x86/x86_64/current.h	Thu Dec 16 15:15:47 2004 +0000
    13.2 +++ b/xen/include/asm-x86/x86_64/current.h	Thu Dec 16 15:41:47 2004 +0000
    13.3 @@ -9,18 +9,18 @@ struct domain;
    13.4  #define STACK_RESERVED \
    13.5      (sizeof(execution_context_t))
    13.6  
    13.7 -static inline struct domain * get_current(void)
    13.8 +static inline struct exec_domain *get_current(void)
    13.9  {
   13.10 -    struct domain *current;
   13.11 -    current = read_pda(pcurrent);
   13.12 -    return current;
   13.13 +    struct exec_domain *ed;
   13.14 +    ed = read_pda(pcurrent);
   13.15 +    return ed;
   13.16  }
   13.17   
   13.18  #define current get_current()
   13.19  
   13.20 -static inline void set_current(struct domain *p)
   13.21 +static inline void set_current(struct exec_domain *ed)
   13.22  {
   13.23 -    write_pda(pcurrent,p);
   13.24 +    write_pda(pcurrent, ed);
   13.25  }
   13.26  
   13.27  static inline execution_context_t *get_execution_context(void)