ia64/xen-unstable

changeset 16854:56307d5809cc

minios: make allocation of page tables for ondemand mapping lazy

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 23 11:20:01 2008 +0000 (2008-01-23)
parents 470e9dc0afc2
children bcae9d2cc2f8
files extras/mini-os/arch/x86/mm.c
line diff
     1.1 --- a/extras/mini-os/arch/x86/mm.c	Wed Jan 23 11:15:57 2008 +0000
     1.2 +++ b/extras/mini-os/arch/x86/mm.c	Wed Jan 23 11:20:01 2008 +0000
     1.3 @@ -366,93 +366,106 @@ void mem_test(unsigned long *start_add, 
     1.4  
     1.5  }
     1.6  
     1.7 -static pgentry_t *demand_map_pgt;
     1.8 -static void *demand_map_area_start;
     1.9 -#define DEMAND_MAP_PAGES 1024
    1.10 -
    1.11 -void arch_init_demand_mapping_area(unsigned long max_pfn)
    1.12 +static pgentry_t *get_pgt(unsigned long addr)
    1.13  {
    1.14      unsigned long mfn;
    1.15      pgentry_t *tab;
    1.16 -    unsigned long start_addr;
    1.17 +    unsigned offset;
    1.18 +
    1.19 +    tab = (pgentry_t *)start_info.pt_base;
    1.20 +    mfn = virt_to_mfn(start_info.pt_base);
    1.21 +
    1.22 +#if defined(__x86_64__)
    1.23 +    offset = l4_table_offset(addr);
    1.24 +    if (!(tab[offset] & _PAGE_PRESENT))
    1.25 +        return NULL;
    1.26 +    mfn = pte_to_mfn(tab[offset]);
    1.27 +    tab = mfn_to_virt(mfn);
    1.28 +#endif
    1.29 +#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
    1.30 +    offset = l3_table_offset(addr);
    1.31 +    if (!(tab[offset] & _PAGE_PRESENT))
    1.32 +        return NULL;
    1.33 +    mfn = pte_to_mfn(tab[offset]);
    1.34 +    tab = mfn_to_virt(mfn);
    1.35 +#endif
    1.36 +    offset = l2_table_offset(addr);
    1.37 +    if (!(tab[offset] & _PAGE_PRESENT))
    1.38 +        return NULL;
    1.39 +    mfn = pte_to_mfn(tab[offset]);
    1.40 +    tab = mfn_to_virt(mfn);
    1.41 +    offset = l1_table_offset(addr);
    1.42 +    return &tab[offset];
    1.43 +}
    1.44 +
    1.45 +static pgentry_t *need_pgt(unsigned long addr)
    1.46 +{
    1.47 +    unsigned long mfn;
    1.48 +    pgentry_t *tab;
    1.49      unsigned long pt_pfn;
    1.50      unsigned offset;
    1.51  
    1.52 -    /* Round up to four megs.  + 1024 rather than + 1023 since we want
    1.53 -       to be sure we don't end up in the same place we started. */
    1.54 -    max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
    1.55 -    if (max_pfn == 0 ||
    1.56 -            (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
    1.57 -            HYPERVISOR_VIRT_START) {
    1.58 -        printk("Too much memory; no room for demand map hole.\n");
    1.59 -        do_exit();
    1.60 -    }
    1.61 -
    1.62 -    demand_map_area_start = pfn_to_virt(max_pfn);
    1.63 -    printk("Demand map pfns start at %lx (%p).\n", max_pfn,
    1.64 -            demand_map_area_start);
    1.65 -    start_addr = (unsigned long)demand_map_area_start;
    1.66 -
    1.67      tab = (pgentry_t *)start_info.pt_base;
    1.68      mfn = virt_to_mfn(start_info.pt_base);
    1.69 -    pt_pfn = virt_to_pfn(alloc_page());
    1.70  
    1.71  #if defined(__x86_64__)
    1.72 -    offset = l4_table_offset(start_addr);
    1.73 +    offset = l4_table_offset(addr);
    1.74      if (!(tab[offset] & _PAGE_PRESENT)) {
    1.75 +        pt_pfn = virt_to_pfn(alloc_page());
    1.76          new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
    1.77 -        pt_pfn = virt_to_pfn(alloc_page());
    1.78      }
    1.79      ASSERT(tab[offset] & _PAGE_PRESENT);
    1.80      mfn = pte_to_mfn(tab[offset]);
    1.81 -    tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
    1.82 +    tab = mfn_to_virt(mfn);
    1.83  #endif
    1.84  #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
    1.85 -    offset = l3_table_offset(start_addr);
    1.86 +    offset = l3_table_offset(addr);
    1.87      if (!(tab[offset] & _PAGE_PRESENT)) {
    1.88 +        pt_pfn = virt_to_pfn(alloc_page());
    1.89          new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
    1.90 +    }
    1.91 +    ASSERT(tab[offset] & _PAGE_PRESENT);
    1.92 +    mfn = pte_to_mfn(tab[offset]);
    1.93 +    tab = mfn_to_virt(mfn);
    1.94 +#endif
    1.95 +    offset = l2_table_offset(addr);
    1.96 +    if (!(tab[offset] & _PAGE_PRESENT)) {
    1.97          pt_pfn = virt_to_pfn(alloc_page());
    1.98 +	new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
    1.99      }
   1.100      ASSERT(tab[offset] & _PAGE_PRESENT);
   1.101      mfn = pte_to_mfn(tab[offset]);
   1.102 -    tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
   1.103 +    tab = mfn_to_virt(mfn);
   1.104 +
   1.105 +    offset = l1_table_offset(addr);
   1.106 +    return &tab[offset];
   1.107 +}
   1.108 +
   1.109 +static unsigned long demand_map_area_start;
   1.110 +#ifdef __x86_64__
   1.111 +#define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
   1.112 +#else
   1.113 +#define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
   1.114  #endif
   1.115 -    offset = l2_table_offset(start_addr);
   1.116 -    if (tab[offset] & _PAGE_PRESENT) {
   1.117 -        printk("Demand map area already has a page table covering it?\n");
   1.118 -        BUG();
   1.119 -    }
   1.120 -    demand_map_pgt = pfn_to_virt(pt_pfn);
   1.121 -    new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
   1.122 -    ASSERT(tab[offset] & _PAGE_PRESENT);
   1.123 -    printk("Initialised demand area.\n");
   1.124 +
   1.125 +void arch_init_demand_mapping_area(unsigned long cur_pfn)
   1.126 +{
   1.127 +    cur_pfn++;
   1.128 +
   1.129 +    demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
   1.130 +    cur_pfn += DEMAND_MAP_PAGES;
   1.131 +    printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
   1.132  }
   1.133  
   1.134  #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
   1.135 -
   1.136 -void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
   1.137 -	unsigned long increment, unsigned long alignment, domid_t id,
   1.138 -	int may_fail, unsigned long prot)
   1.139 +void do_map_frames(unsigned long addr,
   1.140 +        unsigned long *f, unsigned long n, unsigned long stride,
   1.141 +	unsigned long increment, domid_t id, int may_fail, unsigned long prot)
   1.142  {
   1.143 -    unsigned long x;
   1.144 -    unsigned long y = 0;
   1.145 -    int rc;
   1.146 +    pgentry_t *pgt = NULL;
   1.147      unsigned long done = 0;
   1.148 -
   1.149 -    /* Find a run of n contiguous frames */
   1.150 -    for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
   1.151 -        for (y = 0; y < n; y++)
   1.152 -            if (demand_map_pgt[x+y] & _PAGE_PRESENT)
   1.153 -                break;
   1.154 -        if (y == n)
   1.155 -            break;
   1.156 -    }
   1.157 -    if (y != n) {
   1.158 -        printk("Failed to find %ld frames!\n", n);
   1.159 -        return NULL;
   1.160 -    }
   1.161 -
   1.162 -    /* Found it at x.  Map it in. */
   1.163 +    unsigned long i;
   1.164 +    int rc;
   1.165  
   1.166      while (done < n) {
   1.167  	unsigned long todo;
   1.168 @@ -468,9 +481,11 @@ void *map_frames_ex(unsigned long *f, un
   1.169  	{
   1.170  	    mmu_update_t mmu_updates[todo];
   1.171  
   1.172 -	    for (y = 0; y < todo; y++) {
   1.173 -		mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + done + y]);
   1.174 -		mmu_updates[y].val = ((f[(done + y) * stride] + (done + y) * increment) << PAGE_SHIFT) | prot;
   1.175 +	    for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
   1.176 +                if (!pgt || !(addr & L1_MASK))
   1.177 +                    pgt = need_pgt(addr);
   1.178 +		mmu_updates[i].ptr = virt_to_mach(pgt);
   1.179 +		mmu_updates[i].val = ((f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
   1.180  	    }
   1.181  
   1.182  	    rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
   1.183 @@ -478,16 +493,48 @@ void *map_frames_ex(unsigned long *f, un
   1.184  		if (may_fail)
   1.185  		    f[done * stride] |= 0xF0000000;
   1.186  		else {
   1.187 -		    printk("Map %ld (%lx, ...) failed: %d.\n", todo, f[done * stride], rc);
   1.188 -		    return NULL;
   1.189 +		    printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
   1.190 +                    do_exit();
   1.191  		}
   1.192  	    }
   1.193  	}
   1.194  
   1.195  	done += todo;
   1.196      }
   1.197 -    return (void *)(unsigned long)((unsigned long)demand_map_area_start +
   1.198 -	    x * PAGE_SIZE);
   1.199 +}
   1.200 +
   1.201 +void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
   1.202 +	unsigned long increment, unsigned long alignment, domid_t id,
   1.203 +	int may_fail, unsigned long prot)
   1.204 +{
   1.205 +    unsigned long x;
   1.206 +    unsigned long y = 0;
   1.207 +
   1.208 +    /* Find a properly aligned run of n contiguous frames */
   1.209 +    for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
   1.210 +        unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
   1.211 +        pgentry_t *pgt = get_pgt(addr);
   1.212 +        for (y = 0; y < n; y++, addr += PAGE_SIZE) {
   1.213 +            if (!(addr & L1_MASK))
   1.214 +                pgt = get_pgt(addr);
   1.215 +            if (pgt) {
   1.216 +                if (*pgt & _PAGE_PRESENT)
   1.217 +                    break;
   1.218 +                pgt++;
   1.219 +            }
   1.220 +        }
   1.221 +        if (y == n)
   1.222 +            break;
   1.223 +    }
   1.224 +    if (y != n) {
   1.225 +        printk("Failed to find %ld frames!\n", n);
   1.226 +        return NULL;
   1.227 +    }
   1.228 +
   1.229 +    /* Found it at x.  Map it in. */
   1.230 +    do_map_frames(demand_map_area_start + x * PAGE_SIZE, f, n, stride, increment, id, may_fail, prot);
   1.231 +
   1.232 +    return (void *)(unsigned long)(demand_map_area_start + x * PAGE_SIZE);
   1.233  }
   1.234  
   1.235  static void clear_bootstrap(void)