#include <asm/page.h>
static DEFINE_SPINLOCK(vm_lock);
-static void *__read_mostly vm_base;
-#define vm_bitmap ((unsigned long *)vm_base)
+static void *__read_mostly vm_base[VMAP_REGION_NR];
+#define vm_bitmap(x) ((unsigned long *)vm_base[x])
/* highest allocated bit in the bitmap */
-static unsigned int __read_mostly vm_top;
+static unsigned int __read_mostly vm_top[VMAP_REGION_NR];
/* total number of bits in the bitmap */
-static unsigned int __read_mostly vm_end;
+static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
/* lowest known clear bit in the bitmap */
-static unsigned int vm_low;
+static unsigned int vm_low[VMAP_REGION_NR];
-void __init vm_init(void)
+void __init vm_init_type(enum vmap_region type, void *start, void *end)
{
unsigned int i, nr;
unsigned long va;
- vm_base = (void *)VMAP_VIRT_START;
- vm_end = PFN_DOWN(arch_vmap_virt_end() - vm_base);
- vm_low = PFN_UP((vm_end + 7) / 8);
- nr = PFN_UP((vm_low + 7) / 8);
- vm_top = nr * PAGE_SIZE * 8;
+ ASSERT(!vm_base[type]);
- for ( i = 0, va = (unsigned long)vm_bitmap; i < nr; ++i, va += PAGE_SIZE )
+ vm_base[type] = start;
+ vm_end[type] = PFN_DOWN(end - start);
+ vm_low[type]= PFN_UP((vm_end[type] + 7) / 8);
+ nr = PFN_UP((vm_low[type] + 7) / 8);
+ vm_top[type] = nr * PAGE_SIZE * 8;
+
+ for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; ++i, va += PAGE_SIZE )
{
struct page_info *pg = alloc_domheap_page(NULL, 0);
map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
clear_page((void *)va);
}
- bitmap_fill(vm_bitmap, vm_low);
+ bitmap_fill(vm_bitmap(type), vm_low[type]);
/* Populate page tables for the bitmap if necessary. */
- populate_pt_range(va, 0, vm_low - nr);
+ populate_pt_range(va, 0, vm_low[type] - nr);
}
-void *vm_alloc(unsigned int nr, unsigned int align)
+static void *vm_alloc(unsigned int nr, unsigned int align,
+ enum vmap_region t)
{
unsigned int start, bit;
else if ( align & (align - 1) )
align &= -align;
+ ASSERT((t >= VMAP_DEFAULT) && (t < VMAP_REGION_NR));
+ if ( !vm_base[t] )
+ return NULL;
+
spin_lock(&vm_lock);
for ( ; ; )
{
struct page_info *pg;
- ASSERT(vm_low == vm_top || !test_bit(vm_low, vm_bitmap));
- for ( start = vm_low; start < vm_top; )
+ ASSERT(vm_low[t] == vm_top[t] || !test_bit(vm_low[t], vm_bitmap(t)));
+ for ( start = vm_low[t]; start < vm_top[t]; )
{
- bit = find_next_bit(vm_bitmap, vm_top, start + 1);
- if ( bit > vm_top )
- bit = vm_top;
+ bit = find_next_bit(vm_bitmap(t), vm_top[t], start + 1);
+ if ( bit > vm_top[t] )
+ bit = vm_top[t];
/*
* Note that this skips the first bit, making the
* corresponding page a guard one.
*/
start = (start + align) & ~(align - 1);
- if ( bit < vm_top )
+ if ( bit < vm_top[t] )
{
if ( start + nr < bit )
break;
- start = find_next_zero_bit(vm_bitmap, vm_top, bit + 1);
+ start = find_next_zero_bit(vm_bitmap(t), vm_top[t], bit + 1);
}
else
{
}
}
- if ( start < vm_top )
+ if ( start < vm_top[t] )
break;
spin_unlock(&vm_lock);
- if ( vm_top >= vm_end )
+ if ( vm_top[t] >= vm_end[t] )
return NULL;
pg = alloc_domheap_page(NULL, 0);
spin_lock(&vm_lock);
- if ( start >= vm_top )
+ if ( start >= vm_top[t] )
{
- unsigned long va = (unsigned long)vm_bitmap + vm_top / 8;
+ unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
{
clear_page((void *)va);
- vm_top += PAGE_SIZE * 8;
- if ( vm_top > vm_end )
- vm_top = vm_end;
+ vm_top[t] += PAGE_SIZE * 8;
+ if ( vm_top[t] > vm_end[t] )
+ vm_top[t] = vm_end[t];
continue;
}
}
free_domheap_page(pg);
- if ( start >= vm_top )
+ if ( start >= vm_top[t] )
{
spin_unlock(&vm_lock);
return NULL;
}
for ( bit = start; bit < start + nr; ++bit )
- __set_bit(bit, vm_bitmap);
- if ( bit < vm_top )
- ASSERT(!test_bit(bit, vm_bitmap));
+ __set_bit(bit, vm_bitmap(t));
+ if ( bit < vm_top[t] )
+ ASSERT(!test_bit(bit, vm_bitmap(t)));
else
- ASSERT(bit == vm_top);
- if ( start <= vm_low + 2 )
- vm_low = bit;
+ ASSERT(bit == vm_top[t]);
+ if ( start <= vm_low[t] + 2 )
+ vm_low[t] = bit;
spin_unlock(&vm_lock);
- return vm_base + start * PAGE_SIZE;
+ return vm_base[t] + start * PAGE_SIZE;
}
-static unsigned int vm_index(const void *va)
+static unsigned int vm_index(const void *va, enum vmap_region type)
{
unsigned long addr = (unsigned long)va & ~(PAGE_SIZE - 1);
unsigned int idx;
+ unsigned long start = (unsigned long)vm_base[type];
+
+ if ( !start )
+ return 0;
- if ( addr < VMAP_VIRT_START + (vm_end / 8) ||
- addr >= VMAP_VIRT_START + vm_top * PAGE_SIZE )
+ if ( addr < start + (vm_end[type] / 8) ||
+ addr >= start + vm_top[type] * PAGE_SIZE )
return 0;
- idx = PFN_DOWN(va - vm_base);
- return !test_bit(idx - 1, vm_bitmap) &&
- test_bit(idx, vm_bitmap) ? idx : 0;
+ idx = PFN_DOWN(va - vm_base[type]);
+ return !test_bit(idx - 1, vm_bitmap(type)) &&
+ test_bit(idx, vm_bitmap(type)) ? idx : 0;
}
-static unsigned int vm_size(const void *va)
+static unsigned int vm_size(const void *va, enum vmap_region type)
{
- unsigned int start = vm_index(va), end;
+ unsigned int start = vm_index(va, type), end;
if ( !start )
return 0;
- end = find_next_zero_bit(vm_bitmap, vm_top, start + 1);
+ end = find_next_zero_bit(vm_bitmap(type), vm_top[type], start + 1);
- return min(end, vm_top) - start;
+ return min(end, vm_top[type]) - start;
}
-void vm_free(const void *va)
+static void vm_free(const void *va)
{
- unsigned int bit = vm_index(va);
+ enum vmap_region type = VMAP_DEFAULT;
+ unsigned int bit = vm_index(va, type);
+
+ if ( !bit )
+ {
+ type = VMAP_XEN;
+ bit = vm_index(va, type);
+ }
if ( !bit )
{
}
spin_lock(&vm_lock);
- if ( bit < vm_low )
+ if ( bit < vm_low[type] )
{
- vm_low = bit - 1;
- while ( !test_bit(vm_low - 1, vm_bitmap) )
- --vm_low;
+ vm_low[type] = bit - 1;
+ while ( !test_bit(vm_low[type] - 1, vm_bitmap(type)) )
+ --vm_low[type];
}
- while ( __test_and_clear_bit(bit, vm_bitmap) )
- if ( ++bit == vm_top )
+ while ( __test_and_clear_bit(bit, vm_bitmap(type)) )
+ if ( ++bit == vm_top[type] )
break;
spin_unlock(&vm_lock);
}
void *__vmap(const mfn_t *mfn, unsigned int granularity,
- unsigned int nr, unsigned int align, unsigned int flags)
+ unsigned int nr, unsigned int align, unsigned int flags,
+ enum vmap_region type)
{
- void *va = vm_alloc(nr * granularity, align);
+ void *va = vm_alloc(nr * granularity, align, type);
unsigned long cur = (unsigned long)va;
for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
void *vmap(const mfn_t *mfn, unsigned int nr)
{
- return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR);
+ return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
}
void vunmap(const void *va)
{
-#ifndef _PAGE_NONE
unsigned long addr = (unsigned long)va;
+ unsigned int pages = vm_size(va, VMAP_DEFAULT);
+
+ if ( !pages )
+ pages = vm_size(va, VMAP_XEN);
- destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
+#ifndef _PAGE_NONE
+ destroy_xen_mappings(addr, addr + PAGE_SIZE * pages);
#else /* Avoid tearing down intermediate page tables. */
- map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
+ map_pages_to_xen(addr, 0, pages, _PAGE_NONE);
#endif
vm_free(va);
}
-void *vmalloc(size_t size)
+static void *vmalloc_type(size_t size, enum vmap_region type)
{
mfn_t *mfn;
size_t pages, i;
mfn[i] = _mfn(page_to_mfn(pg));
}
- va = vmap(mfn, pages);
+ va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
if ( va == NULL )
goto error;
return NULL;
}
+void *vmalloc(size_t size)
+{
+ return vmalloc_type(size, VMAP_DEFAULT);
+}
+
+void *vmalloc_xen(size_t size)
+{
+ return vmalloc_type(size, VMAP_XEN);
+}
+
void *vzalloc(size_t size)
{
- void *p = vmalloc(size);
+ void *p = vmalloc_type(size, VMAP_DEFAULT);
int i;
if ( p == NULL )
unsigned int i, pages;
struct page_info *pg;
PAGE_LIST_HEAD(pg_list);
+ enum vmap_region type = VMAP_DEFAULT;
if ( !va )
return;
- pages = vm_size(va);
+ pages = vm_size(va, type);
+ if ( !pages )
+ {
+ type = VMAP_XEN;
+ pages = vm_size(va, type);
+ }
ASSERT(pages);
for ( i = 0; i < pages; i++ )