ia64/linux-2.6.18-xen.hg

view arch/x86_64/kernel/e820-xen.c @ 526:f128849f9e78

linux/x86: set up machine_e820 in copy_e820_map() for it to be usable
earlier (and also for being placed more logically).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 22 14:41:51 2008 +0100 (2008-04-22)
parents c8c9bc0b7e29
children 81c5a517a42b
line source
1 /*
2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
4 *
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 *
10 */
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
20 #include <asm/pgtable.h>
21 #include <asm/page.h>
22 #include <asm/e820.h>
23 #include <asm/proto.h>
24 #include <asm/bootsetup.h>
25 #include <asm/sections.h>
26 #include <xen/interface/memory.h>
28 /*
29 * PFN of last memory page.
30 */
31 unsigned long end_pfn;
32 EXPORT_SYMBOL(end_pfn);
34 /*
35 * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
36 * The direct mapping extends to end_pfn_map, so that we can directly access
37 * apertures, ACPI and other tables without having to play with fixmaps.
38 */
39 unsigned long end_pfn_map;
41 /*
42 * Last pfn which the user wants to use.
43 */
44 unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
46 extern struct resource code_resource, data_resource;
48 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
49 static inline int bad_addr(unsigned long *addrp, unsigned long size)
50 {
51 unsigned long addr = *addrp, last = addr + size;
53 #ifndef CONFIG_XEN
54 /* various gunk below that needed for SMP startup */
55 if (addr < 0x8000) {
56 *addrp = 0x8000;
57 return 1;
58 }
60 /* direct mapping tables of the kernel */
61 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
62 *addrp = table_end << PAGE_SHIFT;
63 return 1;
64 }
66 /* initrd */
67 #ifdef CONFIG_BLK_DEV_INITRD
68 if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
69 addr < INITRD_START+INITRD_SIZE) {
70 *addrp = INITRD_START + INITRD_SIZE;
71 return 1;
72 }
73 #endif
74 /* kernel code + 640k memory hole (later should not be needed, but
75 be paranoid for now) */
76 if (last >= 640*1024 && addr < 1024*1024) {
77 *addrp = 1024*1024;
78 return 1;
79 }
80 if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
81 *addrp = __pa_symbol(&_end);
82 return 1;
83 }
85 if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
86 *addrp = ebda_addr + ebda_size;
87 return 1;
88 }
90 /* XXX ramdisk image here? */
91 #else
92 if (last < (table_end<<PAGE_SHIFT)) {
93 *addrp = table_end << PAGE_SHIFT;
94 return 1;
95 }
96 #endif
97 return 0;
98 }
100 /*
101 * This function checks if any part of the range <start,end> is mapped
102 * with type.
103 */
104 int e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
105 {
106 int i;
108 #ifndef CONFIG_XEN
109 for (i = 0; i < e820.nr_map; i++) {
110 struct e820entry *ei = &e820.map[i];
111 #else
112 extern struct e820map machine_e820;
114 if (!is_initial_xendomain())
115 return 0;
116 for (i = 0; i < machine_e820.nr_map; i++) {
117 const struct e820entry *ei = &machine_e820.map[i];
118 #endif
120 if (type && ei->type != type)
121 continue;
122 if (ei->addr >= end || ei->addr + ei->size <= start)
123 continue;
124 return 1;
125 }
126 return 0;
127 }
128 EXPORT_SYMBOL_GPL(e820_any_mapped);
130 /*
131 * This function checks if the entire range <start,end> is mapped with type.
132 *
133 * Note: this function only works correct if the e820 table is sorted and
134 * not-overlapping, which is the case
135 */
136 int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
137 {
138 int i;
140 #ifndef CONFIG_XEN
141 for (i = 0; i < e820.nr_map; i++) {
142 struct e820entry *ei = &e820.map[i];
143 #else
144 extern struct e820map machine_e820;
146 if (!is_initial_xendomain())
147 return 0;
148 for (i = 0; i < machine_e820.nr_map; i++) {
149 const struct e820entry *ei = &machine_e820.map[i];
150 #endif
152 if (type && ei->type != type)
153 continue;
154 /* is the region (part) in overlap with the current region ?*/
155 if (ei->addr >= end || ei->addr + ei->size <= start)
156 continue;
158 /* if the region is at the beginning of <start,end> we move
159 * start to the end of the region since it's ok until there
160 */
161 if (ei->addr <= start)
162 start = ei->addr + ei->size;
163 /* if start is now at or beyond end, we're done, full coverage */
164 if (start >= end)
165 return 1; /* we're done */
166 }
167 return 0;
168 }
170 /*
171 * Find a free area in a specific range.
172 */
173 unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
174 {
175 int i;
176 for (i = 0; i < e820.nr_map; i++) {
177 struct e820entry *ei = &e820.map[i];
178 unsigned long addr = ei->addr, last;
179 if (ei->type != E820_RAM)
180 continue;
181 if (addr < start)
182 addr = start;
183 if (addr > ei->addr + ei->size)
184 continue;
185 while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
186 ;
187 last = addr + size;
188 if (last > ei->addr + ei->size)
189 continue;
190 if (last > end)
191 continue;
192 return addr;
193 }
194 return -1UL;
195 }
197 /*
198 * Free bootmem based on the e820 table for a node.
199 */
200 void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
201 {
202 int i;
203 for (i = 0; i < e820.nr_map; i++) {
204 struct e820entry *ei = &e820.map[i];
205 unsigned long last, addr;
207 if (ei->type != E820_RAM ||
208 ei->addr+ei->size <= start ||
209 ei->addr >= end)
210 continue;
212 addr = round_up(ei->addr, PAGE_SIZE);
213 if (addr < start)
214 addr = start;
216 last = round_down(ei->addr + ei->size, PAGE_SIZE);
217 if (last >= end)
218 last = end;
220 if (last > addr && last-addr >= PAGE_SIZE)
221 free_bootmem_node(pgdat, addr, last-addr);
222 }
223 }
225 /*
226 * Find the highest page frame number we have available
227 */
228 unsigned long __init e820_end_of_ram(void)
229 {
230 int i;
231 unsigned long end_pfn = 0;
233 for (i = 0; i < e820.nr_map; i++) {
234 struct e820entry *ei = &e820.map[i];
235 unsigned long start, end;
237 start = round_up(ei->addr, PAGE_SIZE);
238 end = round_down(ei->addr + ei->size, PAGE_SIZE);
239 if (start >= end)
240 continue;
241 if (ei->type == E820_RAM) {
242 if (end > end_pfn<<PAGE_SHIFT)
243 end_pfn = end>>PAGE_SHIFT;
244 } else {
245 if (end > end_pfn_map<<PAGE_SHIFT)
246 end_pfn_map = end>>PAGE_SHIFT;
247 }
248 }
250 if (end_pfn > end_pfn_map)
251 end_pfn_map = end_pfn;
252 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
253 end_pfn_map = MAXMEM>>PAGE_SHIFT;
254 if (end_pfn > end_user_pfn)
255 end_pfn = end_user_pfn;
256 if (end_pfn > end_pfn_map)
257 end_pfn = end_pfn_map;
259 return end_pfn;
260 }
262 /*
263 * Compute how much memory is missing in a range.
264 * Unlike the other functions in this file the arguments are in page numbers.
265 */
266 unsigned long __init
267 e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
268 {
269 unsigned long ram = 0;
270 unsigned long start = start_pfn << PAGE_SHIFT;
271 unsigned long end = end_pfn << PAGE_SHIFT;
272 int i;
273 for (i = 0; i < e820.nr_map; i++) {
274 struct e820entry *ei = &e820.map[i];
275 unsigned long last, addr;
277 if (ei->type != E820_RAM ||
278 ei->addr+ei->size <= start ||
279 ei->addr >= end)
280 continue;
282 addr = round_up(ei->addr, PAGE_SIZE);
283 if (addr < start)
284 addr = start;
286 last = round_down(ei->addr + ei->size, PAGE_SIZE);
287 if (last >= end)
288 last = end;
290 if (last > addr)
291 ram += last - addr;
292 }
293 return ((end - start) - ram) >> PAGE_SHIFT;
294 }
296 /*
297 * Mark e820 reserved areas as busy for the resource manager.
298 */
299 void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
300 {
301 int i;
302 for (i = 0; i < nr_map; i++) {
303 struct resource *res;
304 res = alloc_bootmem_low(sizeof(struct resource));
305 switch (e820[i].type) {
306 case E820_RAM: res->name = "System RAM"; break;
307 case E820_ACPI: res->name = "ACPI Tables"; break;
308 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
309 default: res->name = "reserved";
310 }
311 res->start = e820[i].addr;
312 res->end = res->start + e820[i].size - 1;
313 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
314 request_resource(&iomem_resource, res);
315 if (e820[i].type == E820_RAM) {
316 /*
317 * We don't know which RAM region contains kernel data,
318 * so we try it repeatedly and let the resource manager
319 * test it.
320 */
321 #ifndef CONFIG_XEN
322 request_resource(res, &code_resource);
323 request_resource(res, &data_resource);
324 #endif
325 #ifdef CONFIG_KEXEC
326 if (crashk_res.start != crashk_res.end)
327 request_resource(res, &crashk_res);
328 #ifdef CONFIG_XEN
329 xen_machine_kexec_register_resources(res);
330 #endif
331 #endif
332 }
333 }
334 }
336 /*
337 * Add a memory region to the kernel e820 map.
338 */
339 void __init add_memory_region(unsigned long start, unsigned long size, int type)
340 {
341 int x = e820.nr_map;
343 if (x == E820MAX) {
344 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
345 return;
346 }
348 e820.map[x].addr = start;
349 e820.map[x].size = size;
350 e820.map[x].type = type;
351 e820.nr_map++;
352 }
354 void __init e820_print_map(char *who)
355 {
356 int i;
358 for (i = 0; i < e820.nr_map; i++) {
359 printk(" %s: %016Lx - %016Lx ", who,
360 (unsigned long long) e820.map[i].addr,
361 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
362 switch (e820.map[i].type) {
363 case E820_RAM: printk("(usable)\n");
364 break;
365 case E820_RESERVED:
366 printk("(reserved)\n");
367 break;
368 case E820_ACPI:
369 printk("(ACPI data)\n");
370 break;
371 case E820_NVS:
372 printk("(ACPI NVS)\n");
373 break;
374 default: printk("type %u\n", e820.map[i].type);
375 break;
376 }
377 }
378 }
380 /*
381 * Sanitize the BIOS e820 map.
382 *
383 * Some e820 responses include overlapping entries. The following
384 * replaces the original e820 map with a new one, removing overlaps.
385 *
386 */
387 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
388 {
389 struct change_member {
390 struct e820entry *pbios; /* pointer to original bios entry */
391 unsigned long long addr; /* address for this change point */
392 };
393 static struct change_member change_point_list[2*E820MAX] __initdata;
394 static struct change_member *change_point[2*E820MAX] __initdata;
395 static struct e820entry *overlap_list[E820MAX] __initdata;
396 static struct e820entry new_bios[E820MAX] __initdata;
397 struct change_member *change_tmp;
398 unsigned long current_type, last_type;
399 unsigned long long last_addr;
400 int chgidx, still_changing;
401 int overlap_entries;
402 int new_bios_entry;
403 int old_nr, new_nr, chg_nr;
404 int i;
406 /*
407 Visually we're performing the following (1,2,3,4 = memory types)...
409 Sample memory map (w/overlaps):
410 ____22__________________
411 ______________________4_
412 ____1111________________
413 _44_____________________
414 11111111________________
415 ____________________33__
416 ___________44___________
417 __________33333_________
418 ______________22________
419 ___________________2222_
420 _________111111111______
421 _____________________11_
422 _________________4______
424 Sanitized equivalent (no overlap):
425 1_______________________
426 _44_____________________
427 ___1____________________
428 ____22__________________
429 ______11________________
430 _________1______________
431 __________3_____________
432 ___________44___________
433 _____________33_________
434 _______________2________
435 ________________1_______
436 _________________4______
437 ___________________2____
438 ____________________33__
439 ______________________4_
440 */
442 /* if there's only one memory region, don't bother */
443 if (*pnr_map < 2)
444 return -1;
446 old_nr = *pnr_map;
448 /* bail out if we find any unreasonable addresses in bios map */
449 for (i=0; i<old_nr; i++)
450 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
451 return -1;
453 /* create pointers for initial change-point information (for sorting) */
454 for (i=0; i < 2*old_nr; i++)
455 change_point[i] = &change_point_list[i];
457 /* record all known change-points (starting and ending addresses),
458 omitting those that are for empty memory regions */
459 chgidx = 0;
460 for (i=0; i < old_nr; i++) {
461 if (biosmap[i].size != 0) {
462 change_point[chgidx]->addr = biosmap[i].addr;
463 change_point[chgidx++]->pbios = &biosmap[i];
464 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
465 change_point[chgidx++]->pbios = &biosmap[i];
466 }
467 }
468 chg_nr = chgidx;
470 /* sort change-point list by memory addresses (low -> high) */
471 still_changing = 1;
472 while (still_changing) {
473 still_changing = 0;
474 for (i=1; i < chg_nr; i++) {
475 /* if <current_addr> > <last_addr>, swap */
476 /* or, if current=<start_addr> & last=<end_addr>, swap */
477 if ((change_point[i]->addr < change_point[i-1]->addr) ||
478 ((change_point[i]->addr == change_point[i-1]->addr) &&
479 (change_point[i]->addr == change_point[i]->pbios->addr) &&
480 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
481 )
482 {
483 change_tmp = change_point[i];
484 change_point[i] = change_point[i-1];
485 change_point[i-1] = change_tmp;
486 still_changing=1;
487 }
488 }
489 }
491 /* create a new bios memory map, removing overlaps */
492 overlap_entries=0; /* number of entries in the overlap table */
493 new_bios_entry=0; /* index for creating new bios map entries */
494 last_type = 0; /* start with undefined memory type */
495 last_addr = 0; /* start with 0 as last starting address */
496 /* loop through change-points, determining affect on the new bios map */
497 for (chgidx=0; chgidx < chg_nr; chgidx++)
498 {
499 /* keep track of all overlapping bios entries */
500 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
501 {
502 /* add map entry to overlap list (> 1 entry implies an overlap) */
503 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
504 }
505 else
506 {
507 /* remove entry from list (order independent, so swap with last) */
508 for (i=0; i<overlap_entries; i++)
509 {
510 if (overlap_list[i] == change_point[chgidx]->pbios)
511 overlap_list[i] = overlap_list[overlap_entries-1];
512 }
513 overlap_entries--;
514 }
515 /* if there are overlapping entries, decide which "type" to use */
516 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
517 current_type = 0;
518 for (i=0; i<overlap_entries; i++)
519 if (overlap_list[i]->type > current_type)
520 current_type = overlap_list[i]->type;
521 /* continue building up new bios map based on this information */
522 if (current_type != last_type) {
523 if (last_type != 0) {
524 new_bios[new_bios_entry].size =
525 change_point[chgidx]->addr - last_addr;
526 /* move forward only if the new size was non-zero */
527 if (new_bios[new_bios_entry].size != 0)
528 if (++new_bios_entry >= E820MAX)
529 break; /* no more space left for new bios entries */
530 }
531 if (current_type != 0) {
532 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
533 new_bios[new_bios_entry].type = current_type;
534 last_addr=change_point[chgidx]->addr;
535 }
536 last_type = current_type;
537 }
538 }
539 new_nr = new_bios_entry; /* retain count for new bios entries */
541 /* copy new bios mapping into original location */
542 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
543 *pnr_map = new_nr;
545 return 0;
546 }
548 /*
549 * Copy the BIOS e820 map into a safe place.
550 *
551 * Sanity-check it while we're at it..
552 *
553 * If we're lucky and live on a modern system, the setup code
554 * will have given us a memory map that we can use to properly
555 * set up memory. If we aren't, we'll fake a memory map.
556 *
557 * We check to see that the memory map contains at least 2 elements
558 * before we'll use it, because the detection code in setup.S may
559 * not be perfect and most every PC known to man has two memory
560 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
561 * thinkpad 560x, for example, does not cooperate with the memory
562 * detection code.)
563 */
564 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
565 {
566 #ifndef CONFIG_XEN
567 /* Only one memory region (or negative)? Ignore it */
568 if (nr_map < 2)
569 return -1;
570 #else
571 BUG_ON(nr_map < 1);
572 #endif
574 do {
575 unsigned long start = biosmap->addr;
576 unsigned long size = biosmap->size;
577 unsigned long end = start + size;
578 unsigned long type = biosmap->type;
580 /* Overflow in 64 bits? Ignore the memory map. */
581 if (start > end)
582 return -1;
584 #ifndef CONFIG_XEN
585 /*
586 * Some BIOSes claim RAM in the 640k - 1M region.
587 * Not right. Fix it up.
588 *
589 * This should be removed on Hammer which is supposed to not
590 * have non e820 covered ISA mappings there, but I had some strange
591 * problems so it stays for now. -AK
592 */
593 if (type == E820_RAM) {
594 if (start < 0x100000ULL && end > 0xA0000ULL) {
595 if (start < 0xA0000ULL)
596 add_memory_region(start, 0xA0000ULL-start, type);
597 if (end <= 0x100000ULL)
598 continue;
599 start = 0x100000ULL;
600 size = end - start;
601 }
602 }
603 #endif
605 add_memory_region(start, size, type);
606 } while (biosmap++,--nr_map);
608 #ifdef CONFIG_XEN
609 if (is_initial_xendomain()) {
610 struct xen_memory_map memmap;
612 memmap.nr_entries = E820MAX;
613 set_xen_guest_handle(memmap.buffer, machine_e820.map);
615 if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
616 BUG();
617 machine_e820.nr_map = memmap.nr_entries;
618 } else
619 machine_e820 = e820;
620 #endif
622 return 0;
623 }
625 #ifndef CONFIG_XEN
626 void __init setup_memory_region(void)
627 {
628 char *who = "BIOS-e820";
630 /*
631 * Try to copy the BIOS-supplied E820-map.
632 *
633 * Otherwise fake a memory map; one section from 0k->640k,
634 * the next section from 1mb->appropriate_mem_k
635 */
636 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
637 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
638 unsigned long mem_size;
640 /* compare results from other methods and take the greater */
641 if (ALT_MEM_K < EXT_MEM_K) {
642 mem_size = EXT_MEM_K;
643 who = "BIOS-88";
644 } else {
645 mem_size = ALT_MEM_K;
646 who = "BIOS-e801";
647 }
649 e820.nr_map = 0;
650 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
651 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
652 }
653 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
654 e820_print_map(who);
655 }
657 #else /* CONFIG_XEN */
659 void __init setup_memory_region(void)
660 {
661 int rc;
662 struct xen_memory_map memmap;
663 /*
664 * This is rather large for a stack variable but this early in
665 * the boot process we know we have plenty slack space.
666 */
667 struct e820entry map[E820MAX];
669 memmap.nr_entries = E820MAX;
670 set_xen_guest_handle(memmap.buffer, map);
672 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
673 if ( rc == -ENOSYS ) {
674 memmap.nr_entries = 1;
675 map[0].addr = 0ULL;
676 map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
677 /* 8MB slack (to balance backend allocations). */
678 map[0].size += 8 << 20;
679 map[0].type = E820_RAM;
680 rc = 0;
681 }
682 BUG_ON(rc);
684 sanitize_e820_map(map, (char *)&memmap.nr_entries);
686 BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
688 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
689 e820_print_map("Xen");
690 }
691 #endif
693 void __init parse_memopt(char *p, char **from)
694 {
695 int i;
696 unsigned long current_end;
697 unsigned long end;
699 end_user_pfn = memparse(p, from);
700 end_user_pfn >>= PAGE_SHIFT;
702 end = end_user_pfn<<PAGE_SHIFT;
703 i = e820.nr_map-1;
704 current_end = e820.map[i].addr + e820.map[i].size;
706 if (current_end < end) {
707 /*
708 * The e820 map ends before our requested size so
709 * extend the final entry to the requested address.
710 */
711 if (e820.map[i].type == E820_RAM)
712 e820.map[i].size = end - e820.map[i].addr;
713 else
714 add_memory_region(current_end, end - current_end, E820_RAM);
715 }
716 }
718 void __init parse_memmapopt(char *p, char **from)
719 {
720 unsigned long long start_at, mem_size;
722 mem_size = memparse(p, from);
723 p = *from;
724 if (*p == '@') {
725 start_at = memparse(p+1, from);
726 add_memory_region(start_at, mem_size, E820_RAM);
727 } else if (*p == '#') {
728 start_at = memparse(p+1, from);
729 add_memory_region(start_at, mem_size, E820_ACPI);
730 } else if (*p == '$') {
731 start_at = memparse(p+1, from);
732 add_memory_region(start_at, mem_size, E820_RESERVED);
733 } else {
734 end_user_pfn = (mem_size >> PAGE_SHIFT);
735 }
736 p = *from;
737 }
739 unsigned long pci_mem_start = 0xaeedbabe;
740 EXPORT_SYMBOL(pci_mem_start);
742 /*
743 * Search for the biggest gap in the low 32 bits of the e820
744 * memory space. We pass this space to PCI to assign MMIO resources
745 * for hotplug or unconfigured devices in.
746 * Hopefully the BIOS let enough space left.
747 */
748 __init void e820_setup_gap(struct e820entry *e820, int nr_map)
749 {
750 unsigned long gapstart, gapsize, round;
751 unsigned long last;
752 int i;
753 int found = 0;
755 last = 0x100000000ull;
756 gapstart = 0x10000000;
757 gapsize = 0x400000;
758 i = nr_map;
759 while (--i >= 0) {
760 unsigned long long start = e820[i].addr;
761 unsigned long long end = start + e820[i].size;
763 /*
764 * Since "last" is at most 4GB, we know we'll
765 * fit in 32 bits if this condition is true
766 */
767 if (last > end) {
768 unsigned long gap = last - end;
770 if (gap > gapsize) {
771 gapsize = gap;
772 gapstart = end;
773 found = 1;
774 }
775 }
776 if (start < last)
777 last = start;
778 }
780 if (!found) {
781 gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
782 printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
783 KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
784 }
786 /*
787 * See how much we want to round up: start off with
788 * rounding to the next 1MB area.
789 */
790 round = 0x100000;
791 while ((gapsize >> 4) > round)
792 round += round;
793 /* Fun with two's complement */
794 pci_mem_start = (gapstart + round) & -round;
796 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
797 pci_mem_start, gapstart, gapsize);
798 }