direct-io.hg
changeset 4159:e379e05dfb91
bitkeeper revision 1.1246 (423705104iQz7lnTkmHF1xmS_A1bNg)
Michael's hacked shadow mode linux
Signed-off-by: michael.fetterman@cl.cam.ac.uk
Michael's hacked shadow mode linux
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author | rneugeba@wyvis.research.intel-research.net |
---|---|
date | Tue Mar 15 15:53:52 2005 +0000 (2005-03-15) |
parents | 2c4ca5aad6c4 |
children | f3a1163f9d2b |
files | linux-2.6.10-xen-sparse/arch/xen/Kconfig linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c |
line diff
1.1 --- a/linux-2.6.10-xen-sparse/arch/xen/Kconfig Tue Mar 15 14:50:10 2005 +0000 1.2 +++ b/linux-2.6.10-xen-sparse/arch/xen/Kconfig Tue Mar 15 15:53:52 2005 +0000 1.3 @@ -146,6 +146,12 @@ config XEN_BATCH_MODE2 1.4 we only use this for benchmarking 1.5 enable only if you know what you are doing 1.6 1.7 +config XEN_SHADOW_MODE 1.8 + bool "Fake shadow mode" 1.9 + default n 1.10 + help 1.11 + fakes out a shadow mode kernel 1.12 + 1.13 1.14 config XEN_SCRUB_PAGES 1.15 bool "Scrub memory before freeing it to Xen"
2.1 --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Mar 15 14:50:10 2005 +0000 2.2 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Mar 15 15:53:52 2005 +0000 2.3 @@ -125,6 +125,7 @@ static inline void increment_index_and_f 2.4 2.5 void queue_l1_entry_update(pte_t *ptr, unsigned long val) 2.6 { 2.7 +#ifndef CONFIG_XEN_SHADOW_MODE 2.8 int cpu = smp_processor_id(); 2.9 int idx; 2.10 unsigned long flags; 2.11 @@ -137,10 +138,15 @@ void queue_l1_entry_update(pte_t *ptr, u 2.12 __flush_page_update_queue(); 2.13 #endif 2.14 spin_unlock_irqrestore(&update_lock, flags); 2.15 +#else 2.16 + _flush_page_update_queue(); 2.17 + *(unsigned long *)ptr = val; 2.18 +#endif 2.19 } 2.20 2.21 void queue_l2_entry_update(pmd_t *ptr, unsigned long val) 2.22 { 2.23 +#ifndef CONFIG_XEN_SHADOW_MODE 2.24 int cpu = smp_processor_id(); 2.25 int idx; 2.26 unsigned long flags; 2.27 @@ -150,6 +156,10 @@ void queue_l2_entry_update(pmd_t *ptr, u 2.28 per_cpu(update_queue[idx], cpu).val = val; 2.29 increment_index(); 2.30 spin_unlock_irqrestore(&update_lock, flags); 2.31 +#else 2.32 + _flush_page_update_queue(); 2.33 + *(unsigned long *)ptr = val; 2.34 +#endif 2.35 } 2.36 2.37 void queue_pt_switch(unsigned long ptr) 2.38 @@ -278,6 +288,7 @@ void queue_machphys_update(unsigned long 2.39 /* queue and flush versions of the above */ 2.40 void xen_l1_entry_update(pte_t *ptr, unsigned long val) 2.41 { 2.42 +#ifndef CONFIG_XEN_SHADOW_MODE 2.43 int cpu = smp_processor_id(); 2.44 int idx; 2.45 unsigned long flags; 2.46 @@ -287,10 +298,14 @@ void xen_l1_entry_update(pte_t *ptr, uns 2.47 per_cpu(update_queue[idx], cpu).val = val; 2.48 increment_index_and_flush(); 2.49 spin_unlock_irqrestore(&update_lock, flags); 2.50 +#else 2.51 + *(unsigned long *)ptr = val; 2.52 +#endif 2.53 } 2.54 2.55 void xen_l2_entry_update(pmd_t *ptr, unsigned long val) 2.56 { 2.57 +#ifndef CONFIG_XEN_SHADOW_MODE 2.58 int cpu = smp_processor_id(); 2.59 int idx; 2.60 unsigned long flags; 2.61 @@ -300,6 +315,9 @@ void xen_l2_entry_update(pmd_t *ptr, uns 2.62 per_cpu(update_queue[idx], cpu).val = val; 2.63 increment_index_and_flush(); 2.64 spin_unlock_irqrestore(&update_lock, flags); 2.65 +#else 2.66 + *(unsigned long *)ptr = val; 2.67 +#endif 2.68 } 2.69 2.70 void xen_pt_switch(unsigned long ptr)
3.1 --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c Tue Mar 15 14:50:10 2005 +0000 3.2 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c Tue Mar 15 15:53:52 2005 +0000 3.3 @@ -77,7 +77,9 @@ static pte_t * __init one_page_table_ini 3.4 { 3.5 if (pmd_none(*pmd)) { 3.6 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 3.7 +#ifndef CONFIG_XEN_SHADOW_MODE 3.8 make_page_readonly(page_table); 3.9 +#endif 3.10 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 3.11 if (page_table != pte_offset_kernel(pmd, 0)) 3.12 BUG(); 3.13 @@ -349,7 +351,9 @@ static void __init pagetable_init (void) 3.14 * it. We clean up by write-enabling and then freeing the old page dir. 3.15 */ 3.16 memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t)); 3.17 +#ifndef CONFIG_XEN_SHADOW_MODE 3.18 make_page_readonly(new_pgd); 3.19 +#endif 3.20 queue_pgd_pin(__pa(new_pgd)); 3.21 load_cr3(new_pgd); 3.22 queue_pgd_unpin(__pa(old_pgd));
4.1 --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c Tue Mar 15 14:50:10 2005 +0000 4.2 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c Tue Mar 15 15:53:52 2005 +0000 4.3 @@ -181,7 +181,9 @@ pte_t *pte_alloc_one_kernel(struct mm_st 4.4 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 4.5 if (pte) { 4.6 clear_page(pte); 4.7 +#ifndef CONFIG_XEN_SHADOW_MODE 4.8 make_page_readonly(pte); 4.9 +#endif 4.10 xen_flush_page_update_queue(); 4.11 } 4.12 return pte; 4.13 @@ -194,7 +196,9 @@ void pte_ctor(void *pte, kmem_cache_t *c 4.14 set_page_count(page, 1); 4.15 4.16 clear_page(pte); 4.17 +#ifndef CONFIG_XEN_SHADOW_MODE 4.18 make_page_readonly(pte); 4.19 +#endif 4.20 queue_pte_pin(__pa(pte)); 4.21 flush_page_update_queue(); 4.22 } 4.23 @@ -304,7 +308,9 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 4.24 spin_unlock_irqrestore(&pgd_lock, flags); 4.25 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 4.26 out: 4.27 +#ifndef CONFIG_XEN_SHADOW_MODE 4.28 make_page_readonly(pgd); 4.29 +#endif 4.30 queue_pgd_pin(__pa(pgd)); 4.31 flush_page_update_queue(); 4.32 }