ia64/xen-unstable

view extras/mini-os/mm.c @ 19557:226ef307cd2e

AMD IOMMU: Fix ioapic interrupt remapping

A few ioapic redirection entries are initialized by hypervisor before
enabling iommu hardware. This patch copies those entries from ioapic
redirection table into interrupt remapping table after interrupt
remapping table has been allocated.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Apr 17 13:16:39 2009 +0100 (2009-04-17)
parents 433d1b26fd51
children
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <xen/memory.h>
40 #include <mm.h>
41 #include <types.h>
42 #include <lib.h>
43 #include <xmalloc.h>
45 #ifdef MM_DEBUG
46 #define DEBUG(_f, _a...) \
47 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
48 #else
49 #define DEBUG(_f, _a...) ((void)0)
50 #endif
52 /*********************
53 * ALLOCATION BITMAP
54 * One bit per page of memory. Bit set => page is allocated.
55 */
57 static unsigned long *alloc_bitmap;
58 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
60 #define allocated_in_map(_pn) \
61 (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1UL<<((_pn)&(PAGES_PER_MAPWORD-1))))
63 /*
64 * Hint regarding bitwise arithmetic in map_{alloc,free}:
65 * -(1<<n) sets all bits >= n.
66 * (1<<n)-1 sets all bits < n.
67 * Variable names in map_{alloc,free}:
68 * *_idx == Index into `alloc_bitmap' array.
69 * *_off == Bit offset within an element of the `alloc_bitmap' array.
70 */
72 static void map_alloc(unsigned long first_page, unsigned long nr_pages)
73 {
74 unsigned long start_off, end_off, curr_idx, end_idx;
76 curr_idx = first_page / PAGES_PER_MAPWORD;
77 start_off = first_page & (PAGES_PER_MAPWORD-1);
78 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
79 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
81 if ( curr_idx == end_idx )
82 {
83 alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
84 }
85 else
86 {
87 alloc_bitmap[curr_idx] |= -(1UL<<start_off);
88 while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
89 alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
90 }
91 }
94 static void map_free(unsigned long first_page, unsigned long nr_pages)
95 {
96 unsigned long start_off, end_off, curr_idx, end_idx;
98 curr_idx = first_page / PAGES_PER_MAPWORD;
99 start_off = first_page & (PAGES_PER_MAPWORD-1);
100 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
101 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
103 if ( curr_idx == end_idx )
104 {
105 alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
106 }
107 else
108 {
109 alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
110 while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
111 alloc_bitmap[curr_idx] &= -(1UL<<end_off);
112 }
113 }
117 /*************************
118 * BINARY BUDDY ALLOCATOR
119 */
121 typedef struct chunk_head_st chunk_head_t;
122 typedef struct chunk_tail_st chunk_tail_t;
124 struct chunk_head_st {
125 chunk_head_t *next;
126 chunk_head_t **pprev;
127 int level;
128 };
130 struct chunk_tail_st {
131 int level;
132 };
134 /* Linked lists of free chunks of different powers-of-two in size. */
135 #define FREELIST_SIZE ((sizeof(void*)<<3)-PAGE_SHIFT)
136 static chunk_head_t *free_head[FREELIST_SIZE];
137 static chunk_head_t free_tail[FREELIST_SIZE];
138 #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
140 #define round_pgdown(_p) ((_p)&PAGE_MASK)
141 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
143 #ifdef MM_DEBUG
144 /*
145 * Prints allocation[0/1] for @nr_pages, starting at @start
146 * address (virtual).
147 */
148 USED static void print_allocation(void *start, int nr_pages)
149 {
150 unsigned long pfn_start = virt_to_pfn(start);
151 int count;
152 for(count = 0; count < nr_pages; count++)
153 if(allocated_in_map(pfn_start + count)) printk("1");
154 else printk("0");
156 printk("\n");
157 }
159 /*
160 * Prints chunks (making them with letters) for @nr_pages starting
161 * at @start (virtual).
162 */
163 USED static void print_chunks(void *start, int nr_pages)
164 {
165 char chunks[1001], current='A';
166 int order, count;
167 chunk_head_t *head;
168 unsigned long pfn_start = virt_to_pfn(start);
170 memset(chunks, (int)'_', 1000);
171 if(nr_pages > 1000)
172 {
173 DEBUG("Can only pring 1000 pages. Increase buffer size.");
174 }
176 for(order=0; order < FREELIST_SIZE; order++)
177 {
178 head = free_head[order];
179 while(!FREELIST_EMPTY(head))
180 {
181 for(count = 0; count < 1UL<< head->level; count++)
182 {
183 if(count + virt_to_pfn(head) - pfn_start < 1000)
184 chunks[count + virt_to_pfn(head) - pfn_start] = current;
185 }
186 head = head->next;
187 current++;
188 }
189 }
190 chunks[nr_pages] = '\0';
191 printk("%s\n", chunks);
192 }
193 #endif
196 /*
197 * Initialise allocator, placing addresses [@min,@max] in free pool.
198 * @min and @max are PHYSICAL addresses.
199 */
200 static void init_page_allocator(unsigned long min, unsigned long max)
201 {
202 int i;
203 unsigned long range, bitmap_size;
204 chunk_head_t *ch;
205 chunk_tail_t *ct;
206 for ( i = 0; i < FREELIST_SIZE; i++ )
207 {
208 free_head[i] = &free_tail[i];
209 free_tail[i].pprev = &free_head[i];
210 free_tail[i].next = NULL;
211 }
213 min = round_pgup (min);
214 max = round_pgdown(max);
216 /* Allocate space for the allocation bitmap. */
217 bitmap_size = (max+1) >> (PAGE_SHIFT+3);
218 bitmap_size = round_pgup(bitmap_size);
219 alloc_bitmap = (unsigned long *)to_virt(min);
220 min += bitmap_size;
221 range = max - min;
223 /* All allocated by default. */
224 memset(alloc_bitmap, ~0, bitmap_size);
225 /* Free up the memory we've been given to play with. */
226 map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
228 /* The buddy lists are addressed in high memory. */
229 min = (unsigned long) to_virt(min);
230 max = (unsigned long) to_virt(max);
232 while ( range != 0 )
233 {
234 /*
235 * Next chunk is limited by alignment of min, but also
236 * must not be bigger than remaining range.
237 */
238 for ( i = PAGE_SHIFT; (1UL<<(i+1)) <= range; i++ )
239 if ( min & (1UL<<i) ) break;
242 ch = (chunk_head_t *)min;
243 min += (1UL<<i);
244 range -= (1UL<<i);
245 ct = (chunk_tail_t *)min-1;
246 i -= PAGE_SHIFT;
247 ch->level = i;
248 ch->next = free_head[i];
249 ch->pprev = &free_head[i];
250 ch->next->pprev = &ch->next;
251 free_head[i] = ch;
252 ct->level = i;
253 }
254 }
257 /* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
258 unsigned long alloc_pages(int order)
259 {
260 int i;
261 chunk_head_t *alloc_ch, *spare_ch;
262 chunk_tail_t *spare_ct;
265 /* Find smallest order which can satisfy the request. */
266 for ( i = order; i < FREELIST_SIZE; i++ ) {
267 if ( !FREELIST_EMPTY(free_head[i]) )
268 break;
269 }
271 if ( i == FREELIST_SIZE ) goto no_memory;
273 /* Unlink a chunk. */
274 alloc_ch = free_head[i];
275 free_head[i] = alloc_ch->next;
276 alloc_ch->next->pprev = alloc_ch->pprev;
278 /* We may have to break the chunk a number of times. */
279 while ( i != order )
280 {
281 /* Split into two equal parts. */
282 i--;
283 spare_ch = (chunk_head_t *)((char *)alloc_ch + (1UL<<(i+PAGE_SHIFT)));
284 spare_ct = (chunk_tail_t *)((char *)spare_ch + (1UL<<(i+PAGE_SHIFT)))-1;
286 /* Create new header for spare chunk. */
287 spare_ch->level = i;
288 spare_ch->next = free_head[i];
289 spare_ch->pprev = &free_head[i];
290 spare_ct->level = i;
292 /* Link in the spare chunk. */
293 spare_ch->next->pprev = &spare_ch->next;
294 free_head[i] = spare_ch;
295 }
297 map_alloc(PHYS_PFN(to_phys(alloc_ch)), 1UL<<order);
299 return((unsigned long)alloc_ch);
301 no_memory:
303 printk("Cannot handle page request order %d!\n", order);
305 return 0;
306 }
308 void free_pages(void *pointer, int order)
309 {
310 chunk_head_t *freed_ch, *to_merge_ch;
311 chunk_tail_t *freed_ct;
312 unsigned long mask;
314 /* First free the chunk */
315 map_free(virt_to_pfn(pointer), 1UL << order);
317 /* Create free chunk */
318 freed_ch = (chunk_head_t *)pointer;
319 freed_ct = (chunk_tail_t *)((char *)pointer + (1UL<<(order + PAGE_SHIFT)))-1;
321 /* Now, possibly we can conseal chunks together */
322 while(order < FREELIST_SIZE)
323 {
324 mask = 1UL << (order + PAGE_SHIFT);
325 if((unsigned long)freed_ch & mask)
326 {
327 to_merge_ch = (chunk_head_t *)((char *)freed_ch - mask);
328 if(allocated_in_map(virt_to_pfn(to_merge_ch)) ||
329 to_merge_ch->level != order)
330 break;
332 /* Merge with predecessor */
333 freed_ch = to_merge_ch;
334 }
335 else
336 {
337 to_merge_ch = (chunk_head_t *)((char *)freed_ch + mask);
338 if(allocated_in_map(virt_to_pfn(to_merge_ch)) ||
339 to_merge_ch->level != order)
340 break;
342 /* Merge with successor */
343 freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask) - 1;
344 }
346 /* We are commited to merging, unlink the chunk */
347 *(to_merge_ch->pprev) = to_merge_ch->next;
348 to_merge_ch->next->pprev = to_merge_ch->pprev;
350 order++;
351 }
353 /* Link the new chunk */
354 freed_ch->level = order;
355 freed_ch->next = free_head[order];
356 freed_ch->pprev = &free_head[order];
357 freed_ct->level = order;
359 freed_ch->next->pprev = &freed_ch->next;
360 free_head[order] = freed_ch;
362 }
364 #ifndef __ia64__
365 int free_physical_pages(xen_pfn_t *mfns, int n)
366 {
367 struct xen_memory_reservation reservation;
369 set_xen_guest_handle(reservation.extent_start, mfns);
370 reservation.nr_extents = n;
371 reservation.extent_order = 0;
372 reservation.domid = DOMID_SELF;
373 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
374 }
375 #endif
377 #ifdef HAVE_LIBC
378 void *sbrk(ptrdiff_t increment)
379 {
380 unsigned long old_brk = brk;
381 unsigned long new_brk = old_brk + increment;
383 if (new_brk > heap_end) {
384 printk("Heap exhausted: %p + %lx = %p > %p\n", old_brk, increment, new_brk, heap_end);
385 return NULL;
386 }
388 if (new_brk > heap_mapped) {
389 unsigned long n = (new_brk - heap_mapped + PAGE_SIZE - 1) / PAGE_SIZE;
390 do_map_zero(heap_mapped, n);
391 heap_mapped += n * PAGE_SIZE;
392 }
394 brk = new_brk;
396 return (void *) old_brk;
397 }
398 #endif
402 void init_mm(void)
403 {
405 unsigned long start_pfn, max_pfn;
407 printk("MM: Init\n");
409 arch_init_mm(&start_pfn, &max_pfn);
410 /*
411 * now we can initialise the page allocator
412 */
413 printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n",
414 (u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn),
415 (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn));
416 init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
417 printk("MM: done\n");
419 arch_init_p2m(max_pfn);
421 arch_init_demand_mapping_area(max_pfn);
422 }
424 void fini_mm(void)
425 {
426 }
428 void sanity_check(void)
429 {
430 int x;
431 chunk_head_t *head;
433 for (x = 0; x < FREELIST_SIZE; x++) {
434 for (head = free_head[x]; !FREELIST_EMPTY(head); head = head->next) {
435 ASSERT(!allocated_in_map(virt_to_pfn(head)));
436 if (head->next)
437 ASSERT(head->next->pprev == &head->next);
438 }
439 if (free_head[x]) {
440 ASSERT(free_head[x]->pprev == &free_head[x]);
441 }
442 }
443 }