ia64/xen-unstable

changeset 8204:114946d9937f

Merged.
author emellor@leeni.uk.xensource.com
date Fri Dec 02 22:27:04 2005 +0000 (2005-12-02)
parents b77ceb2481b3 ccb923727f93
children 5e617fd48504
files
line diff
     1.1 --- a/tools/examples/vif-nat	Fri Dec 02 15:57:24 2005 +0000
     1.2 +++ b/tools/examples/vif-nat	Fri Dec 02 22:27:04 2005 +0000
     1.3 @@ -91,8 +91,6 @@ vif_int=$(( $(echo "((($vif_ip" | sed -e
     1.4  netmask=$(dotted_quad $intmask)
     1.5  network=$(dotted_quad $(( $vif_int & $intmask )) )
     1.6  
     1.7 -main_ip=$(dom0_ip)
     1.8 -
     1.9  
    1.10  dhcp_remove_entry()
    1.11  {
    1.12 @@ -140,7 +138,7 @@ case "$command" in
    1.13  
    1.14          do_or_die ip link set "$vif" up arp on
    1.15          do_or_die ip addr add "$router_ip" dev "$vif"
    1.16 -        do_or_die ip route add "$vif_ip" dev "$vif" src "$main_ip"
    1.17 +        do_or_die ip route add "$vif_ip" dev "$vif" src "$router_ip"
    1.18          echo 1 >/proc/sys/net/ipv4/conf/${vif}/proxy_arp
    1.19          [ "$dhcp" != 'no' ] && dhcp_up
    1.20          ;;
     2.1 --- a/xen/arch/x86/Makefile	Fri Dec 02 15:57:24 2005 +0000
     2.2 +++ b/xen/arch/x86/Makefile	Fri Dec 02 22:27:04 2005 +0000
     2.3 @@ -37,7 +37,8 @@ endif
     2.4  default: $(TARGET)
     2.5  
     2.6  $(TARGET): $(TARGET)-syms boot/mkelf32
     2.7 -	./boot/mkelf32 $(TARGET)-syms $(TARGET) 0x100000
     2.8 +	./boot/mkelf32 $(TARGET)-syms $(TARGET) 0x100000 \
     2.9 +	`nm $(TARGET)-syms | sort | tail -n 1 | sed -e 's/^\([^ ]*\).*/0x\1/'`
    2.10  
    2.11  $(CURDIR)/arch.o: $(OBJS)
    2.12  	$(LD) $(LDFLAGS) -r -o $@ $(OBJS)
     3.1 --- a/xen/arch/x86/boot/mkelf32.c	Fri Dec 02 15:57:24 2005 +0000
     3.2 +++ b/xen/arch/x86/boot/mkelf32.c	Fri Dec 02 22:27:04 2005 +0000
     3.3 @@ -222,6 +222,7 @@ static void do_read(int fd, void *data, 
     3.4  
     3.5  int main(int argc, char **argv)
     3.6  {
     3.7 +    u64        final_exec_addr;
     3.8      u32        loadbase, dat_siz, mem_siz;
     3.9      char      *inimage, *outimage;
    3.10      int        infd, outfd;
    3.11 @@ -234,15 +235,17 @@ int main(int argc, char **argv)
    3.12      Elf64_Ehdr in64_ehdr;
    3.13      Elf64_Phdr in64_phdr;
    3.14  
    3.15 -    if ( argc != 4 )
    3.16 +    if ( argc != 5 )
    3.17      {
    3.18 -        fprintf(stderr, "Usage: mkelf32 <in-image> <out-image> <load-base>\n");
    3.19 +        fprintf(stderr, "Usage: mkelf32 <in-image> <out-image> "
    3.20 +                "<load-base> <final-exec-addr>\n");
    3.21          return 1;
    3.22      }
    3.23  
    3.24      inimage  = argv[1];
    3.25      outimage = argv[2];
    3.26      loadbase = strtoul(argv[3], NULL, 16);
    3.27 +    final_exec_addr = strtoul(argv[4], NULL, 16);
    3.28  
    3.29      infd = open(inimage, O_RDONLY);
    3.30      if ( infd == -1 )
    3.31 @@ -286,7 +289,10 @@ int main(int argc, char **argv)
    3.32  
    3.33          (void)lseek(infd, in32_phdr.p_offset, SEEK_SET);
    3.34          dat_siz = (u32)in32_phdr.p_filesz;
    3.35 -        mem_siz = (u32)in32_phdr.p_memsz;
    3.36 +
    3.37 +        /* Do not use p_memsz: it does not include BSS alignment padding. */
    3.38 +        /*mem_siz = (u32)in32_phdr.p_memsz;*/
    3.39 +        mem_siz = (u32)(final_exec_addr - in32_phdr.p_vaddr);
    3.40          break;
    3.41  
    3.42      case ELFCLASS64:
    3.43 @@ -314,7 +320,10 @@ int main(int argc, char **argv)
    3.44  
    3.45          (void)lseek(infd, in64_phdr.p_offset, SEEK_SET);
    3.46          dat_siz = (u32)in64_phdr.p_filesz;
    3.47 -        mem_siz = (u32)in64_phdr.p_memsz;
    3.48 +
    3.49 +        /* Do not use p_memsz: it does not include BSS alignment padding. */
    3.50 +        /*mem_siz = (u32)in64_phdr.p_memsz;*/
    3.51 +        mem_siz = (u32)(final_exec_addr - in64_phdr.p_vaddr);
    3.52          break;
    3.53  
    3.54      default:
     4.1 --- a/xen/arch/x86/boot/x86_32.S	Fri Dec 02 15:57:24 2005 +0000
     4.2 +++ b/xen/arch/x86/boot/x86_32.S	Fri Dec 02 22:27:04 2005 +0000
     4.3 @@ -74,10 +74,6 @@ 1:      lss     stack_start-__PAGE_OFFSE
     4.4          cmp     $0x2BADB002,%eax
     4.5          jne     not_multiboot
     4.6  
     4.7 -        /* Save the Multiboot info structure for later use. */
     4.8 -	add     $__PAGE_OFFSET,%ebx
     4.9 -        push    %ebx
    4.10 -
    4.11          /* Initialize BSS (no nasty surprises!) */
    4.12          mov     $__bss_start-__PAGE_OFFSET,%edi
    4.13          mov     $_end-__PAGE_OFFSET,%ecx
    4.14 @@ -85,6 +81,10 @@ 1:      lss     stack_start-__PAGE_OFFSE
    4.15          xor     %eax,%eax
    4.16          rep     stosb
    4.17  
    4.18 +        /* Save the Multiboot info structure for later use. */
    4.19 +        add     $__PAGE_OFFSET,%ebx
    4.20 +        push    %ebx
    4.21 +
    4.22  #ifdef CONFIG_X86_PAE
    4.23          /* Initialize low and high mappings of all memory with 2MB pages */
    4.24          mov     $idle_pg_table_l2-__PAGE_OFFSET,%edi
    4.25 @@ -238,27 +238,28 @@ ENTRY(gdt_table)
    4.26          .fill 2*NR_CPUS,8,0          /* space for TSS and LDT per CPU    */
    4.27  
    4.28          .org 0x2000
    4.29 -/* Maximum STACK_ORDER for x86/32 is 1. We must therefore ensure that the */
    4.30 -/* CPU0 stack is aligned on an even page boundary!                        */
    4.31 -ENTRY(cpu0_stack)
    4.32 -        .org 0x2000 + STACK_SIZE
    4.33  
    4.34  #ifdef CONFIG_X86_PAE
    4.35 -
    4.36  ENTRY(idle_pg_table)
    4.37  ENTRY(idle_pg_table_l3)
    4.38 -        .quad 0x100000 + 0x2000 + STACK_SIZE + 1*PAGE_SIZE + 0x01
    4.39 -        .quad 0x100000 + 0x2000 + STACK_SIZE + 2*PAGE_SIZE + 0x01
    4.40 -        .quad 0x100000 + 0x2000 + STACK_SIZE + 3*PAGE_SIZE + 0x01
    4.41 -        .quad 0x100000 + 0x2000 + STACK_SIZE + 4*PAGE_SIZE + 0x01
    4.42 -        .org 0x2000 + STACK_SIZE + 1*PAGE_SIZE
    4.43 +        .long idle_pg_table_l2 + 0*PAGE_SIZE + 0x01 - __PAGE_OFFSET, 0
    4.44 +        .long idle_pg_table_l2 + 1*PAGE_SIZE + 0x01 - __PAGE_OFFSET, 0
    4.45 +        .long idle_pg_table_l2 + 2*PAGE_SIZE + 0x01 - __PAGE_OFFSET, 0
    4.46 +        .long idle_pg_table_l2 + 3*PAGE_SIZE + 0x01 - __PAGE_OFFSET, 0
    4.47 +.section ".bss.page_aligned","w"
    4.48  ENTRY(idle_pg_table_l2)
    4.49 -        .org 0x2000 + STACK_SIZE + 5*PAGE_SIZE
    4.50 +        .fill 4*PAGE_SIZE,1,0
    4.51 +#else
    4.52 +.section ".bss.page_aligned","w"
    4.53 +ENTRY(idle_pg_table)
    4.54 +ENTRY(idle_pg_table_l2)
    4.55 +        .fill 1*PAGE_SIZE,1,0
    4.56 +#endif
    4.57  
    4.58 -#else /* CONFIG_X86_PAE */
    4.59 -
    4.60 -ENTRY(idle_pg_table)
    4.61 -ENTRY(idle_pg_table_l2) # Initial page directory is 4kB
    4.62 -        .org 0x2000 + STACK_SIZE + PAGE_SIZE
    4.63 -
    4.64 -#endif /* CONFIG_X86_PAE */
    4.65 +#if (STACK_ORDER == 0)
    4.66 +.section ".bss.page_aligned","w"
    4.67 +#else
    4.68 +.section ".bss.twopage_aligned","w"
    4.69 +#endif
    4.70 +ENTRY(cpu0_stack)
    4.71 +        .fill STACK_SIZE,1,0
     5.1 --- a/xen/arch/x86/boot/x86_64.S	Fri Dec 02 15:57:24 2005 +0000
     5.2 +++ b/xen/arch/x86/boot/x86_64.S	Fri Dec 02 22:27:04 2005 +0000
     5.3 @@ -249,13 +249,8 @@ ENTRY(idle_pg_table_4)
     5.4  ENTRY(idle_pg_table_l3)
     5.5          .quad idle_pg_table_l2 - __PAGE_OFFSET + 7
     5.6  
     5.7 +/* Initial PDE -- level-2 page table. Maps first 64MB physical memory. */
     5.8          .org 0x4000
     5.9 -/* Maximum STACK_ORDER for x86/64 is 2. We must therefore ensure that the */
    5.10 -/* CPU0 stack is aligned on a 4-page boundary.                            */
    5.11 -ENTRY(cpu0_stack)
    5.12 -
    5.13 -/* Initial PDE -- level-2 page table. Maps first 64MB physical memory. */
    5.14 -        .org 0x4000 + STACK_SIZE
    5.15  ENTRY(idle_pg_table_l2)
    5.16          .macro identmap from=0, count=32
    5.17          .if \count-1
    5.18 @@ -265,7 +260,15 @@ ENTRY(idle_pg_table_l2)
    5.19          .quad 0x00000000000001e3 + \from
    5.20          .endif
    5.21          .endm
    5.22 -        identmap /* Too orangey for crows :-) */
    5.23 +        identmap
    5.24 +
    5.25 +        .org 0x4000 + PAGE_SIZE
    5.26 +        .code64
    5.27  
    5.28 -        .org 0x4000 + STACK_SIZE + PAGE_SIZE
    5.29 -        .code64
    5.30 +#if (STACK_ORDER == 0)
    5.31 +.section ".bss.page_aligned","w"
    5.32 +#else
    5.33 +.section ".bss.twopage_aligned","w"
    5.34 +#endif
    5.35 +ENTRY(cpu0_stack)
    5.36 +        .fill STACK_SIZE,1,0
     6.1 --- a/xen/arch/x86/mm.c	Fri Dec 02 15:57:24 2005 +0000
     6.2 +++ b/xen/arch/x86/mm.c	Fri Dec 02 22:27:04 2005 +0000
     6.3 @@ -128,8 +128,9 @@ static int mod_l1_entry(l1_pgentry_t *, 
     6.4  
     6.5  /* Used to defer flushing of memory structures. */
     6.6  static struct {
     6.7 -#define DOP_FLUSH_TLB   (1<<0) /* Flush the TLB.                 */
     6.8 -#define DOP_RELOAD_LDT  (1<<1) /* Reload the LDT shadow mapping. */
     6.9 +#define DOP_FLUSH_TLB      (1<<0) /* Flush the local TLB.                    */
    6.10 +#define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */
    6.11 +#define DOP_RELOAD_LDT     (1<<2) /* Reload the LDT shadow mapping.          */
    6.12      unsigned int   deferred_ops;
    6.13      /* If non-NULL, specifies a foreign subject domain for some operations. */
    6.14      struct domain *foreign;
    6.15 @@ -1323,14 +1324,28 @@ void free_page_type(struct pfn_info *pag
    6.16      struct domain *owner = page_get_owner(page);
    6.17      unsigned long gpfn;
    6.18  
    6.19 -    if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
    6.20 +    if ( likely(owner != NULL) )
    6.21      {
    6.22 -        mark_dirty(owner, page_to_pfn(page));
    6.23 -        if ( unlikely(shadow_mode_refcounts(owner)) )
    6.24 -            return;
    6.25 -        gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
    6.26 -        ASSERT(VALID_M2P(gpfn));
    6.27 -        remove_shadow(owner, gpfn, type & PGT_type_mask);
    6.28 +        /*
    6.29 +         * We have to flush before the next use of the linear mapping
    6.30 +         * (e.g., update_va_mapping()) or we could end up modifying a page
    6.31 +         * that is no longer a page table (and hence screw up ref counts).
    6.32 +         */
    6.33 +        percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS;
    6.34 +
    6.35 +        if ( unlikely(shadow_mode_enabled(owner)) )
    6.36 +        {
    6.37 +            /* Raw page tables are rewritten during save/restore. */
    6.38 +            if ( !shadow_mode_translate(owner) )
    6.39 +                mark_dirty(owner, page_to_pfn(page));
    6.40 +
    6.41 +            if ( shadow_mode_refcounts(owner) )
    6.42 +                return;
    6.43 +
    6.44 +            gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
    6.45 +            ASSERT(VALID_M2P(gpfn));
    6.46 +            remove_shadow(owner, gpfn, type & PGT_type_mask);
    6.47 +        }
    6.48      }
    6.49  
    6.50      switch ( type & PGT_type_mask )
    6.51 @@ -1600,11 +1615,14 @@ static void process_deferred_ops(unsigne
    6.52      deferred_ops = percpu_info[cpu].deferred_ops;
    6.53      percpu_info[cpu].deferred_ops = 0;
    6.54  
    6.55 -    if ( deferred_ops & DOP_FLUSH_TLB )
    6.56 +    if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
    6.57      {
    6.58          if ( shadow_mode_enabled(d) )
    6.59              shadow_sync_all(d);
    6.60 -        local_flush_tlb();
    6.61 +        if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
    6.62 +            flush_tlb_mask(d->cpumask);
    6.63 +        else
    6.64 +            local_flush_tlb();
    6.65      }
    6.66          
    6.67      if ( deferred_ops & DOP_RELOAD_LDT )
     7.1 --- a/xen/arch/x86/x86_32/xen.lds	Fri Dec 02 15:57:24 2005 +0000
     7.2 +++ b/xen/arch/x86/x86_32/xen.lds	Fri Dec 02 22:27:04 2005 +0000
     7.3 @@ -23,7 +23,6 @@ SECTIONS
     7.4    _etext = .;			/* End of text section */
     7.5  
     7.6    .rodata : { *(.rodata) *(.rodata.*) } :text
     7.7 -  .kstrtab : { *(.kstrtab) } :text
     7.8  
     7.9    . = ALIGN(32);		/* Exception table */
    7.10    __start___ex_table = .;
    7.11 @@ -35,24 +34,11 @@ SECTIONS
    7.12    __pre_ex_table : { *(__pre_ex_table) } :text
    7.13    __stop___pre_ex_table = .;
    7.14  
    7.15 -  __start___ksymtab = .;	/* Kernel symbol table */
    7.16 -  __ksymtab : { *(__ksymtab) } :text
    7.17 -  __stop___ksymtab = .;
    7.18 -
    7.19 -  __start___kallsyms = .;	/* All kernel symbols */
    7.20 -  __kallsyms : { *(__kallsyms) } :text
    7.21 -  __stop___kallsyms = .;
    7.22 -
    7.23    .data : {			/* Data */
    7.24  	*(.data)
    7.25  	CONSTRUCTORS
    7.26  	} :text
    7.27  
    7.28 -  _edata = .;			/* End of data section */
    7.29 -
    7.30 -  . = ALIGN(8192);		/* init_task */
    7.31 -  .data.init_task : { *(.data.init_task) } :text
    7.32 -
    7.33    . = ALIGN(4096);		/* Init code and data */
    7.34    __init_begin = .;
    7.35    .text.init : { *(.text.init) } :text
    7.36 @@ -64,10 +50,13 @@ SECTIONS
    7.37    __initcall_start = .;
    7.38    .initcall.init : { *(.initcall.init) } :text
    7.39    __initcall_end = .;
    7.40 +  . = ALIGN(8192);
    7.41    __init_end = .;
    7.42  
    7.43    __bss_start = .;		/* BSS */
    7.44    .bss : {
    7.45 +	*(.bss.twopage_aligned)
    7.46 +	*(.bss.page_aligned)
    7.47  	*(.bss)
    7.48  	} :text
    7.49    _end = . ;
     8.1 --- a/xen/arch/x86/x86_64/xen.lds	Fri Dec 02 15:57:24 2005 +0000
     8.2 +++ b/xen/arch/x86/x86_64/xen.lds	Fri Dec 02 22:27:04 2005 +0000
     8.3 @@ -21,7 +21,6 @@ SECTIONS
     8.4    _etext = .;			/* End of text section */
     8.5  
     8.6    .rodata : { *(.rodata) *(.rodata.*) } :text
     8.7 -  .kstrtab : { *(.kstrtab) } :text
     8.8  
     8.9    . = ALIGN(32);		/* Exception table */
    8.10    __start___ex_table = .;
    8.11 @@ -33,24 +32,11 @@ SECTIONS
    8.12    __pre_ex_table : { *(__pre_ex_table) } :text
    8.13    __stop___pre_ex_table = .;
    8.14  
    8.15 -  __start___ksymtab = .;	/* Kernel symbol table */
    8.16 -  __ksymtab : { *(__ksymtab) } :text
    8.17 -  __stop___ksymtab = .;
    8.18 -
    8.19 -  __start___kallsyms = .;	/* All kernel symbols */
    8.20 -  __kallsyms : { *(__kallsyms) } :text
    8.21 -  __stop___kallsyms = .;
    8.22 -
    8.23    .data : {			/* Data */
    8.24  	*(.data)
    8.25  	CONSTRUCTORS
    8.26  	} :text
    8.27  
    8.28 -  _edata = .;			/* End of data section */
    8.29 -
    8.30 -  . = ALIGN(8192);		/* init_task */
    8.31 -  .data.init_task : { *(.data.init_task) } :text
    8.32 -
    8.33    . = ALIGN(4096);		/* Init code and data */
    8.34    __init_begin = .;
    8.35    .text.init : { *(.text.init) } :text
    8.36 @@ -62,10 +48,13 @@ SECTIONS
    8.37    __initcall_start = .;
    8.38    .initcall.init : { *(.initcall.init) } :text
    8.39    __initcall_end = .;
    8.40 +  . = ALIGN(8192);
    8.41    __init_end = .;
    8.42  
    8.43    __bss_start = .;		/* BSS */
    8.44    .bss : {
    8.45 +	*(.bss.twopage_aligned)
    8.46 +	*(.bss.page_aligned)
    8.47  	*(.bss)
    8.48  	} :text
    8.49    _end = . ;