ia64/xen-unstable

view xen/include/xen/mm.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 1c01814f9a25
children
line source
1 /******************************************************************************
2 * include/xen/mm.h
3 *
4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
5 *
6 * Note that Xen must handle several different physical 'address spaces' and
7 * there is a consistent terminology for these:
8 *
9 * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
10 * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
11 * 3. mfn/maddr: A real machine frame number or address.
12 * 4. pfn/paddr: Used in 'polymorphic' functions that work across all
13 * address spaces, depending on context. See the pagetable
14 * conversion macros in asm-x86/page.h for examples.
15 * Also 'paddr_t' is big enough to store any physical address.
16 *
17 * This scheme provides consistent function and variable names even when
18 * different guests are running in different memory-management modes.
19 * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
20 * will have gpfn == gmfn and gmfn != mfn.
21 * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
22 * 3. A paravirtualised guest with no pseudophysical overlay will have
23 * gpfn == gpmfn == mfn.
24 *
25 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
26 */
28 #ifndef __XEN_MM_H__
29 #define __XEN_MM_H__
31 #include <xen/config.h>
32 #include <xen/types.h>
33 #include <xen/list.h>
34 #include <xen/spinlock.h>
36 struct domain;
37 struct page_info;
39 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
40 paddr_t init_boot_allocator(paddr_t bitmap_start);
41 void init_boot_pages(paddr_t ps, paddr_t pe);
42 unsigned long alloc_boot_pages(
43 unsigned long nr_pfns, unsigned long pfn_align);
44 void end_boot_allocator(void);
46 /* Xen suballocator. These functions are interrupt-safe. */
47 void init_xenheap_pages(paddr_t ps, paddr_t pe);
48 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
49 void free_xenheap_pages(void *v, unsigned int order);
50 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
51 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
53 /* Domain suballocator. These functions are *not* interrupt-safe.*/
54 void init_domheap_pages(paddr_t ps, paddr_t pe);
55 struct page_info *alloc_domheap_pages(
56 struct domain *d, unsigned int order, unsigned int memflags);
57 void free_domheap_pages(struct page_info *pg, unsigned int order);
58 unsigned long avail_domheap_pages_region(
59 unsigned int node, unsigned int min_width, unsigned int max_width);
60 unsigned long avail_domheap_pages(void);
61 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
62 #define free_domheap_page(p) (free_domheap_pages(p,0))
63 unsigned int online_page(unsigned long mfn, uint32_t *status);
64 int offline_page(unsigned long mfn, int broken, uint32_t *status);
65 int query_page_offline(unsigned long mfn, uint32_t *status);
67 void scrub_heap_pages(void);
69 int assign_pages(
70 struct domain *d,
71 struct page_info *pg,
72 unsigned int order,
73 unsigned int memflags);
75 /* memflags: */
76 #define _MEMF_no_refcount 0
77 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
78 #define _MEMF_populate_on_demand 1
79 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
80 #define _MEMF_tmem 2
81 #define MEMF_tmem (1U<<_MEMF_tmem)
82 #define _MEMF_node 8
83 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
84 #define _MEMF_bits 24
85 #define MEMF_bits(n) ((n)<<_MEMF_bits)
87 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
88 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
89 #else
90 #define MAX_ORDER 20 /* 2^20 contiguous pages */
91 #endif
93 #define page_list_entry list_head
95 #include <asm/mm.h>
97 #ifndef page_list_entry
98 struct page_list_head
99 {
100 struct page_info *next, *tail;
101 };
102 /* These must only have instances in struct page_info. */
103 # define page_list_entry
105 # define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
106 # define PAGE_LIST_HEAD(name) \
107 struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
108 # define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
109 # define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = ~0)
111 static inline int
112 page_list_empty(const struct page_list_head *head)
113 {
114 return !head->next;
115 }
116 static inline struct page_info *
117 page_list_first(const struct page_list_head *head)
118 {
119 return head->next;
120 }
121 static inline struct page_info *
122 page_list_next(const struct page_info *page,
123 const struct page_list_head *head)
124 {
125 return page != head->tail ? mfn_to_page(page->list.next) : NULL;
126 }
127 static inline struct page_info *
128 page_list_prev(const struct page_info *page,
129 const struct page_list_head *head)
130 {
131 return page != head->next ? mfn_to_page(page->list.prev) : NULL;
132 }
133 static inline void
134 page_list_add(struct page_info *page, struct page_list_head *head)
135 {
136 if ( head->next )
137 {
138 page->list.next = page_to_mfn(head->next);
139 head->next->list.prev = page_to_mfn(page);
140 }
141 else
142 {
143 head->tail = page;
144 page->list.next = ~0;
145 }
146 page->list.prev = ~0;
147 head->next = page;
148 }
149 static inline void
150 page_list_add_tail(struct page_info *page, struct page_list_head *head)
151 {
152 page->list.next = ~0;
153 if ( head->next )
154 {
155 page->list.prev = page_to_mfn(head->tail);
156 head->tail->list.next = page_to_mfn(page);
157 }
158 else
159 {
160 page->list.prev = ~0;
161 head->next = page;
162 }
163 head->tail = page;
164 }
165 static inline bool_t
166 __page_list_del_head(struct page_info *page, struct page_list_head *head,
167 struct page_info *next, struct page_info *prev)
168 {
169 if ( head->next == page )
170 {
171 if ( head->tail != page )
172 {
173 next->list.prev = ~0;
174 head->next = next;
175 }
176 else
177 head->tail = head->next = NULL;
178 return 1;
179 }
181 if ( head->tail == page )
182 {
183 prev->list.next = ~0;
184 head->tail = prev;
185 return 1;
186 }
188 return 0;
189 }
190 static inline void
191 page_list_del(struct page_info *page, struct page_list_head *head)
192 {
193 struct page_info *next = mfn_to_page(page->list.next);
194 struct page_info *prev = mfn_to_page(page->list.prev);
196 if ( !__page_list_del_head(page, head, next, prev) )
197 {
198 next->list.prev = page->list.prev;
199 prev->list.next = page->list.next;
200 }
201 }
202 static inline void
203 page_list_del2(struct page_info *page, struct page_list_head *head1,
204 struct page_list_head *head2)
205 {
206 struct page_info *next = mfn_to_page(page->list.next);
207 struct page_info *prev = mfn_to_page(page->list.prev);
209 if ( !__page_list_del_head(page, head1, next, prev) &&
210 !__page_list_del_head(page, head2, next, prev) )
211 {
212 next->list.prev = page->list.prev;
213 prev->list.next = page->list.next;
214 }
215 }
216 static inline struct page_info *
217 page_list_remove_head(struct page_list_head *head)
218 {
219 struct page_info *page = head->next;
221 if ( page )
222 page_list_del(page, head);
224 return page;
225 }
226 static inline void
227 page_list_move(struct page_list_head *dst, struct page_list_head *src)
228 {
229 if ( !page_list_empty(src) )
230 {
231 *dst = *src;
232 INIT_PAGE_LIST_HEAD(src);
233 }
234 }
235 static inline void
236 page_list_splice(struct page_list_head *list, struct page_list_head *head)
237 {
238 struct page_info *first, *last, *at;
240 if ( page_list_empty(list) )
241 return;
243 if ( page_list_empty(head) )
244 {
245 head->next = list->next;
246 head->tail = list->tail;
247 return;
248 }
250 first = list->next;
251 last = list->tail;
252 at = head->next;
254 first->list.prev = page_to_mfn(head->next);
255 head->next = first;
257 last->list.next = page_to_mfn(at);
258 at->list.prev = page_to_mfn(last);
259 }
261 #define page_list_for_each(pos, head) \
262 for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
263 #define page_list_for_each_safe(pos, tmp, head) \
264 for ( pos = (head)->next; \
265 pos ? (tmp = page_list_next(pos, head), 1) : 0; \
266 pos = tmp )
267 #define page_list_for_each_safe_reverse(pos, tmp, head) \
268 for ( pos = (head)->tail; \
269 pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
270 pos = tmp )
271 #else
272 # define page_list_head list_head
273 # define PAGE_LIST_HEAD_INIT LIST_HEAD_INIT
274 # define PAGE_LIST_HEAD LIST_HEAD
275 # define INIT_PAGE_LIST_HEAD INIT_LIST_HEAD
276 # define INIT_PAGE_LIST_ENTRY INIT_LIST_HEAD
277 # define page_list_empty list_empty
278 # define page_list_first(hd) list_entry((hd)->next, \
279 struct page_info, list)
280 # define page_list_next(pg, hd) list_entry((pg)->list.next, \
281 struct page_info, list)
282 # define page_list_add(pg, hd) list_add(&(pg)->list, hd)
283 # define page_list_add_tail(pg, hd) list_add_tail(&(pg)->list, hd)
284 # define page_list_del(pg, hd) list_del(&(pg)->list)
285 # define page_list_del2(pg, hd1, hd2) list_del(&(pg)->list)
286 # define page_list_remove_head(hd) (!page_list_empty(hd) ? \
287 ({ \
288 struct page_info *__pg = page_list_first(hd); \
289 list_del(&__pg->list); \
290 __pg; \
291 }) : NULL)
292 # define page_list_move(dst, src) (!list_empty(src) ? \
293 list_replace_init(src, dst) : (void)0)
294 # define page_list_for_each(pos, head) list_for_each_entry(pos, head, list)
295 # define page_list_for_each_safe(pos, tmp, head) \
296 list_for_each_entry_safe(pos, tmp, head, list)
297 # define page_list_for_each_safe_reverse(pos, tmp, head) \
298 list_for_each_entry_safe_reverse(pos, tmp, head, list)
299 # define page_list_splice(list, hd) list_splice(list, hd)
300 #endif
302 /* Automatic page scrubbing for dead domains. */
303 extern struct page_list_head page_scrub_list;
304 #define page_scrub_schedule_work() \
305 do { \
306 if ( !page_list_empty(&page_scrub_list) ) \
307 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
308 } while ( 0 )
309 #define page_scrub_kick() \
310 do { \
311 if ( !page_list_empty(&page_scrub_list) ) \
312 cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
313 } while ( 0 )
314 void scrub_list_splice(struct page_list_head *);
315 void scrub_list_add(struct page_info *);
316 void scrub_one_page(struct page_info *);
317 unsigned long avail_scrub_pages(void);
319 int guest_remove_page(struct domain *d, unsigned long gmfn);
321 #define RAM_TYPE_CONVENTIONAL 0x00000001
322 #define RAM_TYPE_RESERVED 0x00000002
323 #define RAM_TYPE_UNUSABLE 0x00000004
324 #define RAM_TYPE_ACPI 0x00000008
325 /* Returns TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
326 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
328 extern unsigned long *alloc_bitmap; /* for vmcoreinfo */
330 #endif /* __XEN_MM_H__ */