direct-io.hg
changeset 7573:8ded2066e16c
Fix PAE shadow on a machine with RAM above 4G on x86_64 xen.
Currently, on a machine with RAM above 4G, we can not run 32bit VMX
guest on x86_64 xen, the root cause is that the PAE page table PDPT must
below 4G, this patch fixes this issue.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Currently, on a machine with RAM above 4G, we can not run 32bit VMX
guest on x86_64 xen, the root cause is that the PAE page table PDPT must
below 4G, this patch fixes this issue.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Mon Oct 31 17:08:54 2005 +0100 (2005-10-31) |
parents | ea6d9f29dff5 |
children | 60c941136be9 |
files | xen/arch/x86/shadow.c |
line diff
1.1 --- a/xen/arch/x86/shadow.c Mon Oct 31 17:07:14 2005 +0100 1.2 +++ b/xen/arch/x86/shadow.c Mon Oct 31 17:08:54 2005 +0100 1.3 @@ -228,18 +228,20 @@ alloc_shadow_page(struct domain *d, 1.4 */ 1.5 page = alloc_domheap_pages(NULL, SL1_ORDER, 0); 1.6 if (!page) 1.7 - domain_crash_synchronous(); 1.8 + goto no_shadow_page; 1.9 1.10 void *l1_0 = map_domain_page(page_to_pfn(page)); 1.11 - memset(l1_0,0,PAGE_SIZE); 1.12 + memset(l1_0, 0, PAGE_SIZE); 1.13 unmap_domain_page(l1_0); 1.14 + 1.15 void *l1_1 = map_domain_page(page_to_pfn(page+1)); 1.16 - memset(l1_1,0,PAGE_SIZE); 1.17 + memset(l1_1, 0, PAGE_SIZE); 1.18 unmap_domain_page(l1_1); 1.19 #else 1.20 page = alloc_domheap_page(NULL); 1.21 if (!page) 1.22 - domain_crash_synchronous(); 1.23 + goto no_shadow_page; 1.24 + 1.25 void *l1 = map_domain_page(page_to_pfn(page)); 1.26 memset(l1, 0, PAGE_SIZE); 1.27 unmap_domain_page(l1); 1.28 @@ -248,6 +250,9 @@ alloc_shadow_page(struct domain *d, 1.29 else 1.30 { 1.31 page = alloc_domheap_page(NULL); 1.32 + if (!page) 1.33 + goto no_shadow_page; 1.34 + 1.35 void *l1 = map_domain_page(page_to_pfn(page)); 1.36 memset(l1, 0, PAGE_SIZE); 1.37 unmap_domain_page(l1); 1.38 @@ -255,22 +260,26 @@ alloc_shadow_page(struct domain *d, 1.39 } 1.40 } 1.41 else { 1.42 +#if CONFIG_PAGING_LEVELS == 2 1.43 page = alloc_domheap_page(NULL); 1.44 +#elif CONFIG_PAGING_LEVELS == 3 1.45 + if ( psh_type == PGT_l3_shadow ) 1.46 + page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); 1.47 + else 1.48 + page = alloc_domheap_page(NULL); 1.49 +#elif CONFIG_PAGING_LEVELS == 4 1.50 + if ( (psh_type == PGT_l4_shadow) && 1.51 + (d->arch.ops->guest_paging_levels != PAGING_L4) ) 1.52 + page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); 1.53 + else 1.54 + page = alloc_domheap_page(NULL); 1.55 +#endif 1.56 + if (!page) 1.57 + goto no_shadow_page; 1.58 + 1.59 void *lp = map_domain_page(page_to_pfn(page)); 1.60 memset(lp, 0, PAGE_SIZE); 1.61 unmap_domain_page(lp); 1.62 - 1.63 - } 1.64 - if ( unlikely(page == NULL) ) 1.65 - { 1.66 - printk("Couldn't alloc shadow page! dom%d count=%d\n", 1.67 - d->domain_id, d->arch.shadow_page_count); 1.68 - printk("Shadow table counts: l1=%d l2=%d hl2=%d snapshot=%d\n", 1.69 - perfc_value(shadow_l1_pages), 1.70 - perfc_value(shadow_l2_pages), 1.71 - perfc_value(hl2_table_pages), 1.72 - perfc_value(snapshot_pages)); 1.73 - BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */ 1.74 } 1.75 1.76 smfn = page_to_pfn(page); 1.77 @@ -359,7 +368,7 @@ alloc_shadow_page(struct domain *d, 1.78 1.79 return smfn; 1.80 1.81 - fail: 1.82 +fail: 1.83 FSH_LOG("promotion of pfn=%lx mfn=%lx failed! external gnttab refs?", 1.84 gpfn, gmfn); 1.85 if (psh_type == PGT_l1_shadow) 1.86 @@ -377,6 +386,20 @@ alloc_shadow_page(struct domain *d, 1.87 } 1.88 else 1.89 free_domheap_page(page); 1.90 + 1.91 + return 0; 1.92 + 1.93 +no_shadow_page: 1.94 + ASSERT(page == NULL); 1.95 + printk("Couldn't alloc shadow page! dom%d count=%d\n", 1.96 + d->domain_id, d->arch.shadow_page_count); 1.97 + printk("Shadow table counts: l1=%d l2=%d hl2=%d snapshot=%d\n", 1.98 + perfc_value(shadow_l1_pages), 1.99 + perfc_value(shadow_l2_pages), 1.100 + perfc_value(hl2_table_pages), 1.101 + perfc_value(snapshot_pages)); 1.102 + BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */ 1.103 + 1.104 return 0; 1.105 } 1.106