ia64/xen-unstable

changeset 5314:fdf28002f13a

bitkeeper revision 1.1665.3.2 (42a0d0beY3Jku2hxgPBX6_mz23Ha3g)

[patch] libxc x86-64
I've redone the patch to add domU launching support to libxc for x86-64.

Signed-off-by: Jerone Young <jyoung5@us.ibm.com>
Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Fri Jun 03 21:50:54 2005 +0000 (2005-06-03)
parents 4e868aa7cad7
children c59632e7ff3e fea2f7f8df31
files tools/libxc/xc_linux_build.c tools/libxc/xc_private.h
line diff
     1.1 --- a/tools/libxc/xc_linux_build.c	Fri Jun 03 21:42:25 2005 +0000
     1.2 +++ b/tools/libxc/xc_linux_build.c	Fri Jun 03 21:50:54 2005 +0000
     1.3 @@ -3,13 +3,32 @@
     1.4   */
     1.5  
     1.6  #include "xc_private.h"
     1.7 +
     1.8 +#if defined(__i386__)
     1.9  #define ELFSIZE 32
    1.10 +#endif
    1.11 +
    1.12 +#if defined(__x86_64__)
    1.13 +#define ELFSIZE 64
    1.14 +#endif
    1.15 +
    1.16 +
    1.17  #include "xc_elf.h"
    1.18  #include <stdlib.h>
    1.19  #include <zlib.h>
    1.20  
    1.21 +#if defined(__i386__)
    1.22  #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
    1.23  #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    1.24 +#endif
    1.25 +
    1.26 +#if defined(__x86_64__)
    1.27 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
    1.28 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    1.29 +#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    1.30 +#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    1.31 +#endif
    1.32 +
    1.33  
    1.34  #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
    1.35  #define round_pgdown(_p)  ((_p)&PAGE_MASK)
    1.36 @@ -54,9 +73,17 @@ static int setup_guest(int xc_handle,
    1.37  {
    1.38      l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
    1.39      l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
    1.40 +#if defined(__x86_64__)
    1.41 +    l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
    1.42 +    l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
    1.43 +#endif
    1.44      unsigned long *page_array = NULL;
    1.45 -    unsigned long l2tab;
    1.46 -    unsigned long l1tab;
    1.47 +    unsigned long l2tab = 0;
    1.48 +    unsigned long l1tab = 0;
    1.49 +#if defined(__x86_64__)
    1.50 +    unsigned long l3tab = 0;
    1.51 +    unsigned long l4tab = 0;
    1.52 +#endif
    1.53      unsigned long count, i;
    1.54      start_info_t *start_info;
    1.55      shared_info_t *shared_info;
    1.56 @@ -111,30 +138,45 @@ static int setup_guest(int xc_handle,
    1.57          vstartinfo_end   = vstartinfo_start + PAGE_SIZE;
    1.58          vstack_start     = vstartinfo_end;
    1.59          vstack_end       = vstack_start + PAGE_SIZE;
    1.60 -        v_end            = (vstack_end + (1<<22)-1) & ~((1<<22)-1);
    1.61 -        if ( (v_end - vstack_end) < (512 << 10) )
    1.62 -            v_end += 1 << 22; /* Add extra 4MB to get >= 512kB padding. */
    1.63 +        v_end            = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
    1.64 +        if ( (v_end - vstack_end) < (512UL << 10) )
    1.65 +            v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
    1.66 +#if defined(__i386__)
    1.67          if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >> 
    1.68                 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
    1.69              break;
    1.70 +#endif
    1.71 +#if defined(__x86_64__)
    1.72 +#define NR(_l,_h,_s) \
    1.73 +    (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
    1.74 +    ((_l) & ~((1UL<<(_s))-1))) >> (_s))
    1.75 +    if ( (1 + /* # L4 */
    1.76 +        NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
    1.77 +        NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
    1.78 +        NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
    1.79 +        <= nr_pt_pages )
    1.80 +            break;
    1.81 +#endif
    1.82      }
    1.83  
    1.84 +#define _p(a) ((void *) (a))
    1.85 +
    1.86      printf("VIRTUAL MEMORY ARRANGEMENT:\n"
    1.87 -           " Loaded kernel: %08lx->%08lx\n"
    1.88 -           " Init. ramdisk: %08lx->%08lx\n"
    1.89 -           " Phys-Mach map: %08lx->%08lx\n"
    1.90 -           " Page tables:   %08lx->%08lx\n"
    1.91 -           " Start info:    %08lx->%08lx\n"
    1.92 -           " Boot stack:    %08lx->%08lx\n"
    1.93 -           " TOTAL:         %08lx->%08lx\n",
    1.94 -           dsi.v_kernstart, dsi.v_kernend, 
    1.95 -           vinitrd_start, vinitrd_end,
    1.96 -           vphysmap_start, vphysmap_end,
    1.97 -           vpt_start, vpt_end,
    1.98 -           vstartinfo_start, vstartinfo_end,
    1.99 -           vstack_start, vstack_end,
   1.100 -           dsi.v_start, v_end);
   1.101 -    printf(" ENTRY ADDRESS: %08lx\n", dsi.v_kernentry);
   1.102 +           " Loaded kernel: %p->%p\n"
   1.103 +           " Init. ramdisk: %p->%p\n"
   1.104 +           " Phys-Mach map: %p->%p\n"
   1.105 +           " Page tables:   %p->%p\n"
   1.106 +           " Start info:    %p->%p\n"
   1.107 +           " Boot stack:    %p->%p\n"
   1.108 +           " TOTAL:         %p->%p\n",
   1.109 +           _p(dsi.v_kernstart), _p(dsi.v_kernend), 
   1.110 +           _p(vinitrd_start), _p(vinitrd_end),
   1.111 +           _p(vphysmap_start), _p(vphysmap_end),
   1.112 +           _p(vpt_start), _p(vpt_end),
   1.113 +           _p(vstartinfo_start), _p(vstartinfo_end),
   1.114 +           _p(vstack_start), _p(vstack_end),
   1.115 +           _p(dsi.v_start), _p(v_end));
   1.116 +    printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
   1.117  
   1.118      if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
   1.119      {
   1.120 @@ -178,6 +220,7 @@ static int setup_guest(int xc_handle,
   1.121      if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
   1.122          goto error_out;
   1.123  
   1.124 +#if defined(__i386__)
   1.125      /* First allocate page for page dir. */
   1.126      ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
   1.127      l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.128 @@ -217,6 +260,74 @@ static int setup_guest(int xc_handle,
   1.129      }
   1.130      munmap(vl1tab, PAGE_SIZE);
   1.131      munmap(vl2tab, PAGE_SIZE);
   1.132 +#endif
   1.133 +#if defined(__x86_64__)
   1.134 +
   1.135 +#define alloc_pt(ltab, vltab) \
   1.136 +        ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
   1.137 +        if (vltab != NULL) { \
   1.138 +            munmap(vltab, PAGE_SIZE); \
   1.139 +        } \
   1.140 +        if ((vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
   1.141 +                          PROT_READ|PROT_WRITE, \
   1.142 +                          ltab >> PAGE_SHIFT)) == NULL) { \
   1.143 +            munmap(vltab, PAGE_SIZE); \
   1.144 +            goto error_out; \
   1.145 +        } \
   1.146 +        memset(vltab, 0, PAGE_SIZE);
   1.147 +
   1.148 +    /* First allocate page for page dir. */
   1.149 +    ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
   1.150 +    l4tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.151 +    ctxt->pt_base = l4tab;
   1.152 +    
   1.153 +    /* Intiliaize page table */
   1.154 +    if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.155 +                                        PROT_READ|PROT_WRITE,
   1.156 +                                        l4tab >> PAGE_SHIFT)) == NULL )
   1.157 +            goto error_out;
   1.158 +    memset(vl4tab, 0, PAGE_SIZE);
   1.159 +    vl4e = &vl4tab[l4_table_offset(dsi.v_start)];
   1.160 +    
   1.161 +    for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++)
   1.162 +    {
   1.163 +        if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
   1.164 +        {
   1.165 +            alloc_pt(l1tab, vl1tab);
   1.166 +            
   1.167 +                if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
   1.168 +                {
   1.169 +                    alloc_pt(l2tab, vl2tab);
   1.170 +                    if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
   1.171 +                    {
   1.172 +                        alloc_pt(l3tab, vl3tab);
   1.173 +                        vl3e = &vl3tab[l3_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
   1.174 +                        *vl4e = l3tab | L4_PROT;
   1.175 +                        vl4e++;
   1.176 +                    }
   1.177 +                    vl2e = &vl2tab[l2_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
   1.178 +                    *vl3e = l2tab | L3_PROT;
   1.179 +                    vl3e++;
   1.180 +                }
   1.181 +            vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
   1.182 +            *vl2e = l1tab | L2_PROT;
   1.183 +            vl2e++;
   1.184 +        }
   1.185 +        
   1.186 +        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
   1.187 +        if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
   1.188 +            (count <  ((vpt_end  -dsi.v_start)>>PAGE_SHIFT)) ) 
   1.189 +        {
   1.190 +                *vl1e &= ~_PAGE_RW;
   1.191 +        }
   1.192 +            vl1e++;
   1.193 +    }
   1.194 +     
   1.195 +    munmap(vl1tab, PAGE_SIZE);
   1.196 +    munmap(vl2tab, PAGE_SIZE);
   1.197 +    munmap(vl3tab, PAGE_SIZE);
   1.198 +    munmap(vl4tab, PAGE_SIZE);
   1.199 +#endif
   1.200  
   1.201      /* Write the phys->machine and machine->phys table entries. */
   1.202      physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
   1.203 @@ -243,13 +354,23 @@ static int setup_guest(int xc_handle,
   1.204      }
   1.205      munmap(physmap, PAGE_SIZE);
   1.206      
   1.207 +#if defined(__i386__)
   1.208      /*
   1.209       * Pin down l2tab addr as page dir page - causes hypervisor to provide
   1.210       * correct protection for the page
   1.211       */ 
   1.212      if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom) )
   1.213          goto error_out;
   1.214 +#endif
   1.215  
   1.216 +#if defined(__x86_64__)
   1.217 +    /*
   1.218 +     * Pin down l4tab addr as page dir page - causes hypervisor to  provide
   1.219 +     * correct protection for the page
   1.220 +     */
   1.221 +     if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE, l4tab>>PAGE_SHIFT, dom) )
   1.222 +        goto error_out;
   1.223 +#endif
   1.224      start_info = xc_map_foreign_range(
   1.225          xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   1.226          page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
   1.227 @@ -409,7 +530,7 @@ int xc_linux_build(int xc_handle,
   1.228      ctxt->user_regs.es = FLAT_KERNEL_DS;
   1.229      ctxt->user_regs.fs = FLAT_KERNEL_DS;
   1.230      ctxt->user_regs.gs = FLAT_KERNEL_DS;
   1.231 -    ctxt->user_regs.ss = FLAT_KERNEL_DS;
   1.232 +    ctxt->user_regs.ss = FLAT_KERNEL_SS;
   1.233      ctxt->user_regs.cs = FLAT_KERNEL_CS;
   1.234      ctxt->user_regs.eip = vkern_entry;
   1.235      ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
   1.236 @@ -433,7 +554,7 @@ int xc_linux_build(int xc_handle,
   1.237      ctxt->gdt_ents = 0;
   1.238  
   1.239      /* Ring 1 stack is the initial stack. */
   1.240 -    ctxt->kernel_ss = FLAT_KERNEL_DS;
   1.241 +    ctxt->kernel_ss = FLAT_KERNEL_SS;
   1.242      ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE;
   1.243  
   1.244      /* No debugging. */
     2.1 --- a/tools/libxc/xc_private.h	Fri Jun 03 21:42:25 2005 +0000
     2.2 +++ b/tools/libxc/xc_private.h	Fri Jun 03 21:50:54 2005 +0000
     2.3 @@ -29,12 +29,25 @@
     2.4  #define _PAGE_PSE       0x080
     2.5  #define _PAGE_GLOBAL    0x100
     2.6  
     2.7 -
     2.8 +#if defined(__i386__)
     2.9  #define L1_PAGETABLE_SHIFT       12
    2.10  #define L2_PAGETABLE_SHIFT       22
    2.11 - 
    2.12 +#elif defined(__x86_64__)
    2.13 +#define L1_PAGETABLE_SHIFT      12
    2.14 +#define L2_PAGETABLE_SHIFT      21
    2.15 +#define L3_PAGETABLE_SHIFT      30
    2.16 +#define L4_PAGETABLE_SHIFT      39
    2.17 +#endif
    2.18 +
    2.19 +#if defined(__i386__) 
    2.20  #define ENTRIES_PER_L1_PAGETABLE 1024
    2.21  #define ENTRIES_PER_L2_PAGETABLE 1024
    2.22 +#elif defined(__x86_64__)
    2.23 +#define L1_PAGETABLE_ENTRIES    512
    2.24 +#define L2_PAGETABLE_ENTRIES    512
    2.25 +#define L3_PAGETABLE_ENTRIES    512
    2.26 +#define L4_PAGETABLE_ENTRIES    512
    2.27 +#endif
    2.28   
    2.29  #define PAGE_SHIFT              L1_PAGETABLE_SHIFT
    2.30  #define PAGE_SIZE               (1UL << PAGE_SHIFT)
    2.31 @@ -42,11 +55,26 @@
    2.32  
    2.33  typedef unsigned long l1_pgentry_t;
    2.34  typedef unsigned long l2_pgentry_t;
    2.35 +#if defined(__x86_64__)
    2.36 +typedef unsigned long l3_pgentry_t;
    2.37 +typedef unsigned long l4_pgentry_t;
    2.38 +#endif
    2.39  
    2.40 +#if defined(__i386__)
    2.41  #define l1_table_offset(_a) \
    2.42            (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
    2.43  #define l2_table_offset(_a) \
    2.44            ((_a) >> L2_PAGETABLE_SHIFT)
    2.45 +#elif defined(__x86_64__)
    2.46 +#define l1_table_offset(_a) \
    2.47 +  (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
    2.48 +#define l2_table_offset(_a) \
    2.49 +  (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
    2.50 +#define l3_table_offset(_a) \
    2.51 +	(((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
    2.52 +#define l4_table_offset(_a) \
    2.53 +	(((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
    2.54 +#endif
    2.55  
    2.56  #define ERROR(_m, _a...)  \
    2.57      fprintf(stderr, "ERROR: " _m "\n" , ## _a )