ia64/xen-unstable
changeset 17787:24c86abbb387
x86 hvm: Improve paging performance for 64b solaris guests
The following patch provides a 'fast-path' for sh_remove_write_access()
for 64 bit Solaris HVM guests. This provides a significant performance
boost for such guests; our testing shows a 200-400% improvement in
microbenchmarks such as fork(), exit(), etc...
From: Gary Pennington <Gary.Pennington@sun.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
The following patch provides a 'fast-path' for sh_remove_write_access()
for 64 bit Solaris HVM guests. This provides a significant performance
boost for such guests; our testing shows a 200-400% improvement in
microbenchmarks such as fork(), exit(), etc...
From: Gary Pennington <Gary.Pennington@sun.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Jun 05 10:36:19 2008 +0100 (2008-06-05) |
parents | 02132fc864b4 |
children | 129a511d31ee |
files | xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/perfc_defn.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/common.c Thu Jun 05 10:34:01 2008 +0100 1.2 +++ b/xen/arch/x86/mm/shadow/common.c Thu Jun 05 10:36:19 2008 +0100 1.3 @@ -1738,6 +1738,11 @@ int sh_remove_write_access(struct vcpu * 1.4 gfn = mfn_to_gfn(v->domain, gmfn); 1.5 GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4); 1.6 GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4); 1.7 + /* 1.8 + * 64bit Solaris kernel page map at 1.9 + * kpm_vbase; 0xfffffe0000000000UL 1.10 + */ 1.11 + GUESS(0xfffffe0000000000UL + (gfn << PAGE_SHIFT), 4); 1.12 } 1.13 #endif /* CONFIG_PAGING_LEVELS >= 4 */ 1.14
2.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Jun 05 10:34:01 2008 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Jun 05 10:36:19 2008 +0100 2.3 @@ -4007,7 +4007,9 @@ int sh_rm_write_access_from_l1(struct vc 2.4 shadow_l1e_t *sl1e; 2.5 int done = 0; 2.6 int flags; 2.7 +#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC 2.8 mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */ 2.9 +#endif 2.10 2.11 SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, 2.12 {
3.1 --- a/xen/include/asm-x86/perfc_defn.h Thu Jun 05 10:34:01 2008 +0100 3.2 +++ b/xen/include/asm-x86/perfc_defn.h Thu Jun 05 10:36:19 2008 +0100 3.3 @@ -77,8 +77,7 @@ PERFCOUNTER(shadow_writeable, "shad 3.4 PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3") 3.5 PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") 3.6 PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3") 3.7 -PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: 32b linux low") 3.8 -PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: 32b linux high") 3.9 +PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: linux/solaris") 3.10 PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") 3.11 PERFCOUNTER(shadow_mappings, "shadow removes all mappings") 3.12 PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force")