ia64/xen-unstable

changeset 9208:c176d2e45117

64 bit pagetable builder added to mm.c
Signed-off-by: Aravindh Puthiyaparambil
<aravindh.puthiyaparambil@unisys.com>
Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
author kaf24@firebug.cl.cam.ac.uk
date Thu Mar 09 15:57:32 2006 +0100 (2006-03-09)
parents 80bc37d5a32f
children e0d32d7cb5da
files extras/mini-os/Makefile extras/mini-os/domain_config extras/mini-os/include/lib.h extras/mini-os/include/mm.h extras/mini-os/include/os.h extras/mini-os/mm.c extras/mini-os/traps.c
line diff
     1.1 --- a/extras/mini-os/Makefile	Thu Mar 09 15:56:12 2006 +0100
     1.2 +++ b/extras/mini-os/Makefile	Thu Mar 09 15:57:32 2006 +0100
     1.3 @@ -32,6 +32,7 @@ OBJS := $(TARGET_ARCH).o
     1.4  OBJS += $(patsubst %.c,%.o,$(wildcard *.c))
     1.5  OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
     1.6  OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
     1.7 +#OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
     1.8  										   
     1.9  HDRS := $(wildcard include/*.h)
    1.10  HDRS += $(wildcard include/xen/*.h)
     2.1 --- a/extras/mini-os/domain_config	Thu Mar 09 15:56:12 2006 +0100
     2.2 +++ b/extras/mini-os/domain_config	Thu Mar 09 15:57:32 2006 +0100
     2.3 @@ -15,3 +15,5 @@ memory = 32
     2.4  
     2.5  # A name for your domain. All domains must have different names.
     2.6  name = "Mini-OS"
     2.7 +
     2.8 +on_crash = 'destroy'
     3.1 --- a/extras/mini-os/include/lib.h	Thu Mar 09 15:56:12 2006 +0100
     3.2 +++ b/extras/mini-os/include/lib.h	Thu Mar 09 15:57:32 2006 +0100
     3.3 @@ -57,6 +57,7 @@
     3.4  
     3.5  #include <stdarg.h>
     3.6  
     3.7 +
     3.8  /* printing */
     3.9  #define printk  printf
    3.10  #define kprintf printf
     4.1 --- a/extras/mini-os/include/mm.h	Thu Mar 09 15:56:12 2006 +0100
     4.2 +++ b/extras/mini-os/include/mm.h	Thu Mar 09 15:57:32 2006 +0100
     4.3 @@ -25,18 +25,34 @@
     4.4  #ifndef _MM_H_
     4.5  #define _MM_H_
     4.6  
     4.7 -#ifdef __i386__
     4.8 +#if defined(__i386__)
     4.9  #include <xen/arch-x86_32.h>
    4.10 +#elif defined(__x86_64__)
    4.11 +#include <xen/arch-x86_64.h>
    4.12 +#else
    4.13 +#error "Unsupported architecture"
    4.14  #endif
    4.15  
    4.16 -#ifdef __x86_64__
    4.17 -#include <xen/arch-x86_64.h>
    4.18 -#endif
    4.19 +#include <lib.h>
    4.20  
    4.21 -
    4.22 -#ifdef __x86_64__
    4.23 +#define L1_FRAME                1
    4.24 +#define L2_FRAME                2
    4.25 +#define L3_FRAME                3
    4.26  
    4.27  #define L1_PAGETABLE_SHIFT      12
    4.28 +
    4.29 +#if defined(__i386__)
    4.30 +
    4.31 +#define L2_PAGETABLE_SHIFT      22
    4.32 +
    4.33 +#define L1_PAGETABLE_ENTRIES    1024
    4.34 +#define L2_PAGETABLE_ENTRIES    1024
    4.35 +
    4.36 +#define PADDR_BITS              32
    4.37 +#define PADDR_MASK              (~0UL)
    4.38 +
    4.39 +#elif defined(__x86_64__)
    4.40 +
    4.41  #define L2_PAGETABLE_SHIFT      21
    4.42  #define L3_PAGETABLE_SHIFT      30
    4.43  #define L4_PAGETABLE_SHIFT      39
    4.44 @@ -52,29 +68,29 @@
    4.45  #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
    4.46  #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
    4.47  
    4.48 -#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
    4.49 +/* Get physical address of page mapped by pte (paddr_t). */
    4.50 +#define l1e_get_paddr(x)           \
    4.51 +    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
    4.52 +#define l2e_get_paddr(x)           \
    4.53 +    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
    4.54 +#define l3e_get_paddr(x)           \
    4.55 +    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
    4.56 +#define l4e_get_paddr(x)           \
    4.57 +    ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
    4.58 +
    4.59 +#define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
    4.60 +#define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
    4.61  
    4.62  #endif
    4.63  
    4.64 -
    4.65 -
    4.66 -#ifdef __i386__
    4.67 -
    4.68 -#define L1_PAGETABLE_SHIFT      12
    4.69 -#define L2_PAGETABLE_SHIFT      22
    4.70 -
    4.71 -#define L1_PAGETABLE_ENTRIES    1024
    4.72 -#define L2_PAGETABLE_ENTRIES    1024
    4.73 -
    4.74 -#elif defined(__x86_64__)
    4.75 -#endif
    4.76 +#define L1_MASK  ((1UL << L2_PAGETABLE_SHIFT) - 1)
    4.77  
    4.78  /* Given a virtual address, get an entry offset into a page table. */
    4.79  #define l1_table_offset(_a) \
    4.80    (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
    4.81  #define l2_table_offset(_a) \
    4.82    (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
    4.83 -#ifdef __x86_64__
    4.84 +#if defined(__x86_64__)
    4.85  #define l3_table_offset(_a) \
    4.86    (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
    4.87  #define l4_table_offset(_a) \
    4.88 @@ -92,8 +108,15 @@
    4.89  #define _PAGE_PSE      0x080UL
    4.90  #define _PAGE_GLOBAL   0x100UL
    4.91  
    4.92 -#define L1_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
    4.93 -#define L2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_USER)
    4.94 +#if defined(__i386__)
    4.95 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
    4.96 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
    4.97 +#elif defined(__x86_64__)
    4.98 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
    4.99 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
   4.100 +#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
   4.101 +#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
   4.102 +#endif
   4.103  
   4.104  #define PAGE_SIZE       (1UL << L1_PAGETABLE_SHIFT)
   4.105  #define PAGE_SHIFT      L1_PAGETABLE_SHIFT
   4.106 @@ -124,9 +147,9 @@ static __inline__ unsigned long machine_
   4.107      return phys;
   4.108  }
   4.109  
   4.110 -#ifdef __x86_64__
   4.111 +#if defined(__x86_64__)
   4.112  #define VIRT_START              0xFFFFFFFF00000000UL
   4.113 -#else
   4.114 +#elif defined(__i386__)
   4.115  #define VIRT_START              0xC0000000UL
   4.116  #endif
   4.117  
   4.118 @@ -136,6 +159,11 @@ static __inline__ unsigned long machine_
   4.119  #define virt_to_pfn(_virt)         (PFN_DOWN(to_phys(_virt)))
   4.120  #define mach_to_virt(_mach)        (to_virt(machine_to_phys(_mach)))
   4.121  #define mfn_to_virt(_mfn)          (mach_to_virt(_mfn << PAGE_SHIFT))
   4.122 +#define pfn_to_virt(_pfn)          (to_virt(_pfn << PAGE_SHIFT))
   4.123 +
   4.124 +/* Pagetable walking. */
   4.125 +#define pte_to_mfn(_pte)           (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
   4.126 +#define pte_to_virt(_pte)          to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
   4.127  
   4.128  void init_mm(void);
   4.129  unsigned long alloc_pages(int order);
     5.1 --- a/extras/mini-os/include/os.h	Thu Mar 09 15:56:12 2006 +0100
     5.2 +++ b/extras/mini-os/include/os.h	Thu Mar 09 15:57:32 2006 +0100
     5.3 @@ -59,6 +59,8 @@ extern shared_info_t *HYPERVISOR_shared_
     5.4  
     5.5  void trap_init(void);
     5.6  
     5.7 +
     5.8 +
     5.9  /* 
    5.10   * The use of 'barrier' in the following reflects their use as local-lock
    5.11   * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
     6.1 --- a/extras/mini-os/mm.c	Thu Mar 09 15:56:12 2006 +0100
     6.2 +++ b/extras/mini-os/mm.c	Thu Mar 09 15:57:32 2006 +0100
     6.3 @@ -51,7 +51,8 @@
     6.4  unsigned long *phys_to_machine_mapping;
     6.5  extern char *stack;
     6.6  extern char _text, _etext, _edata, _end;
     6.7 -
     6.8 +extern void do_exit(void);
     6.9 +extern void page_walk(unsigned long virt_addr);
    6.10  
    6.11  /*********************
    6.12   * ALLOCATION BITMAP
    6.13 @@ -64,7 +65,6 @@ static unsigned long *alloc_bitmap;
    6.14  #define allocated_in_map(_pn) \
    6.15  (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1<<((_pn)&(PAGES_PER_MAPWORD-1))))
    6.16  
    6.17 -
    6.18  /*
    6.19   * Hint regarding bitwise arithmetic in map_{alloc,free}:
    6.20   *  -(1<<n)  sets all bits >= n. 
    6.21 @@ -208,7 +208,6 @@ static void init_page_allocator(unsigned
    6.22      unsigned long range, bitmap_size;
    6.23      chunk_head_t *ch;
    6.24      chunk_tail_t *ct;
    6.25 -
    6.26      for ( i = 0; i < FREELIST_SIZE; i++ )
    6.27      {
    6.28          free_head[i]       = &free_tail[i];
    6.29 @@ -366,106 +365,181 @@ void free_pages(void *pointer, int order
    6.30      free_head[order] = freed_ch;   
    6.31     
    6.32  }
    6.33 -void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
    6.34 -{
    6.35 -    unsigned long pfn_to_map, pt_frame;
    6.36 -    unsigned long mach_ptd, max_mach_ptd;
    6.37 -    int count;
    6.38 -    unsigned long mach_pte, virt_pte;
    6.39 -    unsigned long *ptd = (unsigned long *)start_info.pt_base;
    6.40 -    mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
    6.41 +
    6.42 +
    6.43 +void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn, 
    6.44 +                                unsigned long offset, unsigned long level)
    6.45 +{   
    6.46 +    unsigned long *tab = (unsigned long *)start_info.pt_base;
    6.47 +    unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn); 
    6.48 +    unsigned long prot_e, prot_t, pincmd;
    6.49 +    mmu_update_t mmu_updates[0];
    6.50      struct mmuext_op pin_request;
    6.51      
    6.52 -    /* Firstly work out what is the first pfn that is not yet in page tables
    6.53 -       NB. Assuming that builder fills whole pt_frames (which it does at the
    6.54 -       moment)
    6.55 -     */  
    6.56 -    pfn_to_map = (start_info.nr_pt_frames - 1) * L1_PAGETABLE_ENTRIES;
    6.57 -    DEBUG("start_pfn=%ld, first pfn_to_map %ld, max_pfn=%ld", 
    6.58 -            *start_pfn, pfn_to_map, *max_pfn);
    6.59 +    DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
    6.60 +           "prev_l_mfn=%lx, offset=%lx\n", 
    6.61 +           level, *pt_pfn, prev_l_mfn, offset);
    6.62  
    6.63 -    /* Machine address of page table directory */
    6.64 -    mach_ptd = phys_to_machine(to_phys(start_info.pt_base));
    6.65 -    mach_ptd += sizeof(void *) * 
    6.66 -        l2_table_offset((unsigned long)to_virt(PFN_PHYS(pfn_to_map)));
    6.67 -  
    6.68 -    max_mach_ptd = sizeof(void *) * 
    6.69 -        l2_table_offset((unsigned long)to_virt(PFN_PHYS(*max_pfn)));
    6.70 -    
    6.71 -    /* Check that we are not trying to access Xen region */
    6.72 -    if(max_mach_ptd > sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START))
    6.73 +    if (level == L1_FRAME)
    6.74 +    {
    6.75 +         prot_e = L1_PROT;
    6.76 +         prot_t = L2_PROT;
    6.77 +         pincmd = MMUEXT_PIN_L1_TABLE;
    6.78 +    }
    6.79 +#if (defined __x86_64__)
    6.80 +    else if (level == L2_FRAME)
    6.81      {
    6.82 -        printk("WARNING: mini-os will not use all the memory supplied\n");
    6.83 -        max_mach_ptd = sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START);
    6.84 -        *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
    6.85 +         prot_e = L2_PROT;
    6.86 +         prot_t = L3_PROT;
    6.87 +         pincmd = MMUEXT_PIN_L2_TABLE;
    6.88      }
    6.89 -    max_mach_ptd += phys_to_machine(to_phys(start_info.pt_base));
    6.90 -    DEBUG("Max_mach_ptd 0x%lx", max_mach_ptd); 
    6.91 -   
    6.92 -    pt_frame = *start_pfn;
    6.93 -    /* Should not happen - no empty, mapped pages */
    6.94 -    if(pt_frame >= pfn_to_map)
    6.95 +    else if (level == L3_FRAME)
    6.96      {
    6.97 -        printk("ERROR: Not even a single empty, mapped page\n");
    6.98 -        *(int*)0=0;
    6.99 +         prot_e = L3_PROT;
   6.100 +         prot_t = L4_PROT;
   6.101 +         pincmd = MMUEXT_PIN_L3_TABLE;
   6.102      }
   6.103 -    
   6.104 -    while(mach_ptd < max_mach_ptd)
   6.105 +#endif
   6.106 +    else
   6.107      {
   6.108 -        /* Correct protection needs to be set for the new page table frame */
   6.109 -        virt_pte = (unsigned long)to_virt(PFN_PHYS(pt_frame));
   6.110 -        mach_pte = ptd[l2_table_offset(virt_pte)] & ~(PAGE_SIZE-1);
   6.111 -        mach_pte += sizeof(void *) * l1_table_offset(virt_pte);
   6.112 -        DEBUG("New page table page: pfn=0x%lx, mfn=0x%lx, virt_pte=0x%lx, "
   6.113 -                "mach_pte=0x%lx", pt_frame, pfn_to_mfn(pt_frame), 
   6.114 -                virt_pte, mach_pte);
   6.115 -        
   6.116 -        /* Update the entry */
   6.117 -        mmu_updates[0].ptr = mach_pte;
   6.118 -        mmu_updates[0].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT | 
   6.119 -                                                    (L1_PROT & ~_PAGE_RW);
   6.120 -        if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
   6.121 -        {
   6.122 -            printk("PTE for new page table page could not be updated\n");
   6.123 -            *(int*)0=0;
   6.124 -        }
   6.125 -        
   6.126 -        /* Pin the page to provide correct protection */
   6.127 -        pin_request.cmd = MMUEXT_PIN_L1_TABLE;
   6.128 -        pin_request.arg1.mfn = pfn_to_mfn(pt_frame);
   6.129 -        if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
   6.130 -        {
   6.131 -            printk("ERROR: pinning failed\n");
   6.132 -            *(int*)0=0;
   6.133 -        }
   6.134 -        
   6.135 -        /* Now fill the new page table page with entries.
   6.136 -           Update the page directory as well. */
   6.137 -        count = 0;
   6.138 -        mmu_updates[count].ptr = mach_ptd;
   6.139 -        mmu_updates[count].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT |
   6.140 -                                                         L2_PROT;
   6.141 -        count++;
   6.142 -        mach_ptd += sizeof(void *);
   6.143 -        mach_pte = phys_to_machine(PFN_PHYS(pt_frame++));
   6.144 -        
   6.145 -        for(;count <= L1_PAGETABLE_ENTRIES && pfn_to_map <= *max_pfn; count++)
   6.146 -        {
   6.147 -            mmu_updates[count].ptr = mach_pte;
   6.148 -            mmu_updates[count].val = 
   6.149 -                pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
   6.150 -            if(count == 1) DEBUG("mach_pte 0x%lx", mach_pte);
   6.151 -            mach_pte += sizeof(void *);
   6.152 -        }
   6.153 -        if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) 
   6.154 -        {            
   6.155 -            printk("ERROR: mmu_update failed\n");
   6.156 -            *(int*)0=0;
   6.157 -        }
   6.158 -        (*start_pfn)++;
   6.159 +         printk("new_pt_frame() called with invalid level number %d\n", level);
   6.160 +         do_exit();
   6.161 +    }    
   6.162 +
   6.163 +    /* Update the entry */
   6.164 +#if (defined __x86_64__)
   6.165 +    tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
   6.166 +    tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
   6.167 +#endif
   6.168 +    mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) + 
   6.169 +                         sizeof(void *)* l1_table_offset(pt_page);
   6.170 +    mmu_updates[0].val = pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | 
   6.171 +                         (prot_e & ~_PAGE_RW);
   6.172 +    if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
   6.173 +    {
   6.174 +         printk("PTE for new page table page could not be updated\n");
   6.175 +         do_exit();
   6.176 +    }
   6.177 +                        
   6.178 +    /* Pin the page to provide correct protection */
   6.179 +    pin_request.cmd = pincmd;
   6.180 +    pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
   6.181 +    if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
   6.182 +    {
   6.183 +        printk("ERROR: pinning failed\n");
   6.184 +        do_exit();
   6.185 +    }
   6.186 +
   6.187 +    /* Now fill the new page table page with entries.
   6.188 +       Update the page directory as well. */
   6.189 +    mmu_updates[0].ptr = (prev_l_mfn << PAGE_SHIFT) + sizeof(void *) * offset;
   6.190 +    mmu_updates[0].val = pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
   6.191 +    if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0) 
   6.192 +    {            
   6.193 +       printk("ERROR: mmu_update failed\n");
   6.194 +       do_exit();
   6.195      }
   6.196  
   6.197 -    *start_pfn = pt_frame;
   6.198 +    *pt_pfn += 1;
   6.199 +}
   6.200 +
   6.201 +void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
   6.202 +{
   6.203 +    unsigned long start_address, end_address;
   6.204 +    unsigned long pfn_to_map, pt_pfn = *start_pfn;
   6.205 +    static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
   6.206 +    unsigned long *tab = (unsigned long *)start_info.pt_base;
   6.207 +    unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
   6.208 +    unsigned long page, offset;
   6.209 +    int count = 0;
   6.210 +
   6.211 +#if defined(__x86_64__)
   6.212 +    pfn_to_map = (start_info.nr_pt_frames - 3) * L1_PAGETABLE_ENTRIES;
   6.213 +#else
   6.214 +    pfn_to_map = (start_info.nr_pt_frames - 1) * L1_PAGETABLE_ENTRIES;
   6.215 +#endif
   6.216 +    start_address = (unsigned long)pfn_to_virt(pfn_to_map);
   6.217 +    end_address = (unsigned long)pfn_to_virt(*max_pfn);
   6.218 +    
   6.219 +    /* We worked out the virtual memory range to map, now mapping loop */
   6.220 +    printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
   6.221 +
   6.222 +    while(start_address < end_address)
   6.223 +    {
   6.224 +        tab = (unsigned long *)start_info.pt_base;
   6.225 +        mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
   6.226 +
   6.227 +#if defined(__x86_64__)
   6.228 +        offset = l4_table_offset(start_address);
   6.229 +        /* Need new L3 pt frame */
   6.230 +        if(!(start_address & L3_MASK)) 
   6.231 +            new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
   6.232 +        
   6.233 +        page = tab[offset];
   6.234 +        mfn = pte_to_mfn(page);
   6.235 +        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
   6.236 +        offset = l3_table_offset(start_address);
   6.237 +        /* Need new L2 pt frame */
   6.238 +        if(!(start_address & L2_MASK)) 
   6.239 +            new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
   6.240 +
   6.241 +        page = tab[offset];
   6.242 +        mfn = pte_to_mfn(page);
   6.243 +        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
   6.244 +#endif
   6.245 +        offset = l2_table_offset(start_address);        
   6.246 +        /* Need new L1 pt frame */
   6.247 +        if(!(start_address & L1_MASK)) 
   6.248 +            new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
   6.249 +       
   6.250 +        page = tab[offset];
   6.251 +        mfn = pte_to_mfn(page);
   6.252 +        offset = l1_table_offset(start_address);
   6.253 +
   6.254 +        mmu_updates[count].ptr = (mfn << PAGE_SHIFT) + sizeof(void *) * offset;
   6.255 +        mmu_updates[count].val = 
   6.256 +            pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
   6.257 +        count++;
   6.258 +        if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
   6.259 +        {
   6.260 +            if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
   6.261 +            {
   6.262 +                printk("PTE could not be updated\n");
   6.263 +                do_exit();
   6.264 +            }
   6.265 +            count = 0;
   6.266 +        }
   6.267 +        start_address += PAGE_SIZE;
   6.268 +    }
   6.269 +
   6.270 +    *start_pfn = pt_pfn;
   6.271 +}
   6.272 +
   6.273 +
   6.274 +void mem_test(unsigned long *start_add, unsigned long *end_add)
   6.275 +{
   6.276 +    unsigned long mask = 0x10000;
   6.277 +    unsigned long *pointer;
   6.278 +
   6.279 +    for(pointer = start_add; pointer < end_add; pointer++)
   6.280 +    {
   6.281 +        if(!(((unsigned long)pointer) & 0xfffff))
   6.282 +        {
   6.283 +            printk("Writing to %lx\n", pointer);
   6.284 +            page_walk((unsigned long)pointer);
   6.285 +        }
   6.286 +        *pointer = (unsigned long)pointer & ~mask;
   6.287 +    }
   6.288 +
   6.289 +    for(pointer = start_add; pointer < end_add; pointer++)
   6.290 +    {
   6.291 +        if(((unsigned long)pointer & ~mask) != *pointer)
   6.292 +            printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
   6.293 +                (unsigned long)pointer, 
   6.294 +                *pointer, 
   6.295 +                ((unsigned long)pointer & ~mask));
   6.296 +    }
   6.297 +
   6.298  }
   6.299  
   6.300  void init_mm(void)
   6.301 @@ -485,23 +559,21 @@ void init_mm(void)
   6.302      phys_to_machine_mapping = (unsigned long *)start_info.mfn_list;
   6.303     
   6.304      /* First page follows page table pages and 3 more pages (store page etc) */
   6.305 -    start_pfn = PFN_UP(to_phys(start_info.pt_base)) + start_info.nr_pt_frames + 3;
   6.306 +    start_pfn = PFN_UP(to_phys(start_info.pt_base)) + 
   6.307 +                start_info.nr_pt_frames + 3;
   6.308      max_pfn = start_info.nr_pages;
   6.309 -
   6.310 +   
   6.311      printk("  start_pfn:    %lx\n", start_pfn);
   6.312      printk("  max_pfn:      %lx\n", max_pfn);
   6.313  
   6.314 -
   6.315 -#ifdef __i386__
   6.316      build_pagetable(&start_pfn, &max_pfn);
   6.317 -#endif
   6.318 -
   6.319 +    
   6.320      /*
   6.321       * now we can initialise the page allocator
   6.322       */
   6.323      printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n",
   6.324             (u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn), 
   6.325             (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn));
   6.326 -    init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));   
   6.327 +    init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
   6.328      printk("MM: done\n");
   6.329  }
     7.1 --- a/extras/mini-os/traps.c	Thu Mar 09 15:56:12 2006 +0100
     7.2 +++ b/extras/mini-os/traps.c	Thu Mar 09 15:57:32 2006 +0100
     7.3 @@ -70,6 +70,30 @@ DO_ERROR(12, "stack segment", stack_segm
     7.4  DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
     7.5  DO_ERROR(18, "machine check", machine_check)
     7.6  
     7.7 +void page_walk(unsigned long virt_address)
     7.8 +{
     7.9 +        unsigned long *tab = (unsigned long *)start_info.pt_base;
    7.10 +        unsigned long addr = virt_address, page;
    7.11 +        printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base);
    7.12 +    
    7.13 +#if defined(__x86_64__)
    7.14 +        page = tab[l4_table_offset(addr)];
    7.15 +        tab = to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
    7.16 +        printk(" L4 = %p (%p)  [offset = %lx]\n", page, tab, l4_table_offset(addr));
    7.17 +
    7.18 +        page = tab[l3_table_offset(addr)];
    7.19 +        tab = to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
    7.20 +        printk("  L3 = %p (%p)  [offset = %lx]\n", page, tab, l3_table_offset(addr));
    7.21 +#endif
    7.22 +        page = tab[l2_table_offset(addr)];
    7.23 +        tab =  to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
    7.24 +        printk("   L2 = %p (%p)  [offset = %lx]\n", page, tab, l2_table_offset(addr));
    7.25 +        
    7.26 +        page = tab[l1_table_offset(addr)];
    7.27 +        printk("    L1 = %p (%p)  [offset = %lx]\n", page, tab, l1_table_offset(addr));
    7.28 +
    7.29 +}
    7.30 +
    7.31  void do_page_fault(struct pt_regs *regs, unsigned long error_code,
    7.32  								                     unsigned long addr)
    7.33  {