ia64/xen-unstable

annotate xen/include/xen/mm.h @ 18975:2090917489c5

PoD memory 7/9: Xen interface

Implement Xen interface to PoD functionality.
* Increase the number of MEMOP bits from 4 to 6 (increasing the number
of available memory operations from 16 to 64).
* Introduce XENMEMF_populate_on_demand, which will cause
populate_physmap() to fill a range with PoD entries rather than
backing it with ram
* Introduce XENMEM_[sg]et_pod_target operation to the memory
hypercall, to get and set PoD cache size. set_pod_target() should be
called during domain creation, as well as after modifying the memory
target of any domain which may have outstanding PoD entries.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jan 05 10:45:48 2009 +0000 (2009-01-05)
parents 8a0415fac759
children 2f993fde1dc6
rev   line source
kaf24@8726 1 /******************************************************************************
kaf24@8726 2 * include/xen/mm.h
kaf24@8726 3 *
kaf24@8726 4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
kaf24@8726 5 *
kaf24@8726 6 * Note that Xen must handle several different physical 'address spaces' and
kaf24@8726 7 * there is a consistent terminology for these:
kaf24@8726 8 *
kaf24@8726 9 * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
kaf24@8726 10 * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
kaf24@8726 11 * 3. mfn/maddr: A real machine frame number or address.
kaf24@8726 12 * 4. pfn/paddr: Used in 'polymorphic' functions that work across all
kaf24@8726 13 * address spaces, depending on context. See the pagetable
kaf24@8726 14 * conversion macros in asm-x86/page.h for examples.
kaf24@8726 15 * Also 'paddr_t' is big enough to store any physical address.
kaf24@8726 16 *
kaf24@8726 17 * This scheme provides consistent function and variable names even when
kaf24@8726 18 * different guests are running in different memory-management modes.
kaf24@8726 19 * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
kaf24@8726 20 * will have gpfn == gmfn and gmfn != mfn.
kaf24@8726 21 * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
kaf24@8726 22 * 3. A paravirtualised guest with no pseudophysical overlay will have
kaf24@8726 23 * gpfn == gpmfn == mfn.
kaf24@8726 24 *
kaf24@8726 25 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
kaf24@8726 26 */
kaf24@1210 27
kaf24@1211 28 #ifndef __XEN_MM_H__
kaf24@1211 29 #define __XEN_MM_H__
kaf24@1210 30
kaf24@4267 31 #include <xen/config.h>
kaf24@5398 32 #include <xen/types.h>
kaf24@4267 33 #include <xen/list.h>
kaf24@4267 34 #include <xen/spinlock.h>
kaf24@4267 35
kaf24@1941 36 struct domain;
kaf24@8726 37 struct page_info;
kaf24@1936 38
kaf24@3354 39 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
kaf24@8726 40 paddr_t init_boot_allocator(paddr_t bitmap_start);
kaf24@8726 41 void init_boot_pages(paddr_t ps, paddr_t pe);
kfraser@14083 42 unsigned long alloc_boot_pages(
kfraser@14083 43 unsigned long nr_pfns, unsigned long pfn_align);
kaf24@3354 44 void end_boot_allocator(void);
kaf24@3354 45
kaf24@2806 46 /* Xen suballocator. These functions are interrupt-safe. */
kaf24@8726 47 void init_xenheap_pages(paddr_t ps, paddr_t pe);
kaf24@5398 48 void *alloc_xenheap_pages(unsigned int order);
kaf24@5398 49 void free_xenheap_pages(void *v, unsigned int order);
kaf24@1920 50 #define alloc_xenheap_page() (alloc_xenheap_pages(0))
kaf24@5398 51 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
kaf24@1749 52
kaf24@2806 53 /* Domain suballocator. These functions are *not* interrupt-safe.*/
kaf24@8726 54 void init_domheap_pages(paddr_t ps, paddr_t pe);
kaf24@8726 55 struct page_info *alloc_domheap_pages(
kfraser@10418 56 struct domain *d, unsigned int order, unsigned int memflags);
kaf24@8726 57 void free_domheap_pages(struct page_info *pg, unsigned int order);
kfraser@15580 58 unsigned long avail_domheap_pages_region(
kfraser@15580 59 unsigned int node, unsigned int min_width, unsigned int max_width);
kaf24@1936 60 unsigned long avail_domheap_pages(void);
keir@17385 61 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
kaf24@5398 62 #define free_domheap_page(p) (free_domheap_pages(p,0))
kaf24@1210 63
kfraser@14098 64 void scrub_heap_pages(void);
kfraser@14098 65
kfraser@10418 66 int assign_pages(
kfraser@10418 67 struct domain *d,
kfraser@10418 68 struct page_info *pg,
kfraser@10418 69 unsigned int order,
kfraser@10418 70 unsigned int memflags);
kfraser@10418 71
kfraser@10418 72 /* memflags: */
kfraser@14103 73 #define _MEMF_no_refcount 0
kfraser@10418 74 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
keir@18975 75 #define _MEMF_populate_on_demand 1
keir@18975 76 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
keir@17385 77 #define _MEMF_node 8
keir@17385 78 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
kfraser@14103 79 #define _MEMF_bits 24
kfraser@14103 80 #define MEMF_bits(n) ((n)<<_MEMF_bits)
kaf24@5929 81
kaf24@10340 82 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 83 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 84 #else
kaf24@10341 85 #define MAX_ORDER 20 /* 2^20 contiguous pages */
kaf24@10340 86 #endif
kaf24@9456 87
kaf24@4267 88 /* Automatic page scrubbing for dead domains. */
kaf24@4267 89 extern struct list_head page_scrub_list;
kaf24@4267 90 #define page_scrub_schedule_work() \
kaf24@4267 91 do { \
kaf24@4267 92 if ( !list_empty(&page_scrub_list) ) \
kaf24@4267 93 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
kaf24@4267 94 } while ( 0 )
kfraser@14340 95 #define page_scrub_kick() \
kfraser@14340 96 do { \
kfraser@14340 97 if ( !list_empty(&page_scrub_list) ) \
kfraser@14340 98 cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
kfraser@14340 99 } while ( 0 )
kaf24@10541 100 unsigned long avail_scrub_pages(void);
kaf24@4267 101
kaf24@1941 102 #include <asm/mm.h>
kaf24@1941 103
cl349@9211 104 int guest_remove_page(struct domain *d, unsigned long gmfn);
cl349@9211 105
kfraser@11177 106 /* Returns TRUE if the memory at address @p is ordinary RAM. */
kfraser@11177 107 int memory_is_conventional_ram(paddr_t p);
kfraser@11177 108
keir@17853 109 extern unsigned long *alloc_bitmap; /* for vmcoreinfo */
keir@17853 110
kaf24@1211 111 #endif /* __XEN_MM_H__ */