ia64/xen-unstable
changeset 4268:4a49af0cc9d9
bitkeeper revision 1.1236.1.113 (42407388qtYnzMaBpNXqBANV55c6Qw)
Manual merge.
Manual merge.
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Mar 22 19:35:36 2005 +0000 (2005-03-22) |
parents | 04400e772fd7 3868fc815598 |
children | affc97a3b3e5 |
files | xen/arch/x86/domain.c xen/common/page_alloc.c xen/common/schedule.c xen/include/xen/mm.h xen/include/xen/softirq.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Tue Mar 22 17:30:13 2005 +0000 1.2 +++ b/xen/arch/x86/domain.c Tue Mar 22 19:35:36 2005 +0000 1.3 @@ -61,7 +61,10 @@ static __attribute_used__ void idle_loop 1.4 { 1.5 irq_stat[cpu].idle_timestamp = jiffies; 1.6 while ( !softirq_pending(cpu) ) 1.7 + { 1.8 + page_scrub_schedule_work(); 1.9 default_idle(); 1.10 + } 1.11 do_softirq(); 1.12 } 1.13 }
2.1 --- a/xen/common/page_alloc.c Tue Mar 22 17:30:13 2005 +0000 2.2 +++ b/xen/common/page_alloc.c Tue Mar 22 19:35:36 2005 +0000 2.3 @@ -29,6 +29,7 @@ 2.4 #include <xen/spinlock.h> 2.5 #include <xen/slab.h> 2.6 #include <xen/irq.h> 2.7 +#include <xen/softirq.h> 2.8 #include <asm/domain_page.h> 2.9 #include <asm/page.h> 2.10 2.11 @@ -547,7 +548,6 @@ void free_domheap_pages(struct pfn_info 2.12 int i, drop_dom_ref; 2.13 struct domain *d = page_get_owner(pg); 2.14 struct exec_domain *ed; 2.15 - void *p; 2.16 int cpu_mask = 0; 2.17 2.18 ASSERT(!in_irq()); 2.19 @@ -579,18 +579,6 @@ void free_domheap_pages(struct pfn_info 2.20 pg[i].tlbflush_timestamp = tlbflush_current_time(); 2.21 pg[i].u.free.cpu_mask = cpu_mask; 2.22 list_del(&pg[i].list); 2.23 - 2.24 - /* 2.25 - * Normally we expect a domain to clear pages before freeing them, 2.26 - * if it cares about the secrecy of their contents. However, after 2.27 - * a domain has died we assume responsibility for erasure. 2.28 - */ 2.29 - if ( unlikely(test_bit(DF_DYING, &d->d_flags)) ) 2.30 - { 2.31 - p = map_domain_mem(page_to_phys(&pg[i])); 2.32 - clear_page(p); 2.33 - unmap_domain_mem(p); 2.34 - } 2.35 } 2.36 2.37 d->tot_pages -= 1 << order; 2.38 @@ -598,7 +586,24 @@ void free_domheap_pages(struct pfn_info 2.39 2.40 spin_unlock_recursive(&d->page_alloc_lock); 2.41 2.42 - free_heap_pages(MEMZONE_DOM, pg, order); 2.43 + if ( likely(!test_bit(DF_DYING, &d->d_flags)) ) 2.44 + { 2.45 + free_heap_pages(MEMZONE_DOM, pg, order); 2.46 + } 2.47 + else 2.48 + { 2.49 + /* 2.50 + * Normally we expect a domain to clear pages before freeing them, 2.51 + * if it cares about the secrecy of their contents. However, after 2.52 + * a domain has died we assume responsibility for erasure. 2.53 + */ 2.54 + for ( i = 0; i < (1 << order); i++ ) 2.55 + { 2.56 + spin_lock(&page_scrub_lock); 2.57 + list_add(&pg[i].list, &page_scrub_list); 2.58 + spin_unlock(&page_scrub_lock); 2.59 + } 2.60 + } 2.61 } 2.62 else 2.63 { 2.64 @@ -617,6 +622,66 @@ unsigned long avail_domheap_pages(void) 2.65 return avail[MEMZONE_DOM]; 2.66 } 2.67 2.68 + 2.69 + 2.70 +/************************* 2.71 + * PAGE SCRUBBING 2.72 + */ 2.73 + 2.74 +static spinlock_t page_scrub_lock; 2.75 +struct list_head page_scrub_list; 2.76 + 2.77 +static void page_scrub_softirq(void) 2.78 +{ 2.79 + struct list_head *ent; 2.80 + struct pfn_info *pg; 2.81 + void *p; 2.82 + int i; 2.83 + s_time_t start = NOW(); 2.84 + 2.85 + /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */ 2.86 + do { 2.87 + spin_lock(&page_scrub_lock); 2.88 + 2.89 + if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) ) 2.90 + { 2.91 + spin_unlock(&page_scrub_lock); 2.92 + return; 2.93 + } 2.94 + 2.95 + /* Peel up to 16 pages from the list. */ 2.96 + for ( i = 0; i < 16; i++ ) 2.97 + if ( (ent = ent->next) == &page_scrub_list ) 2.98 + break; 2.99 + 2.100 + /* Remove peeled pages from the list. */ 2.101 + ent->next->prev = &page_scrub_list; 2.102 + page_scrub_list.next = ent->next; 2.103 + 2.104 + spin_unlock(&page_scrub_lock); 2.105 + 2.106 + /* Working backwards, scrub each page in turn. */ 2.107 + while ( ent != &page_scrub_list ) 2.108 + { 2.109 + pg = list_entry(ent, struct pfn_info, list); 2.110 + ent = ent->prev; 2.111 + p = map_domain_mem(page_to_phys(pg)); 2.112 + clear_page(p); 2.113 + unmap_domain_mem(p); 2.114 + free_heap_pages(MEMZONE_DOM, pg, 0); 2.115 + } 2.116 + } while ( (NOW() - start) < MILLISECS(1) ); 2.117 +} 2.118 + 2.119 +static __init int page_scrub_init(void) 2.120 +{ 2.121 + spin_lock_init(&page_scrub_lock); 2.122 + INIT_LIST_HEAD(&page_scrub_list); 2.123 + open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq); 2.124 + return 0; 2.125 +} 2.126 +__initcall(page_scrub_init); 2.127 + 2.128 /* 2.129 * Local variables: 2.130 * mode: C
3.1 --- a/xen/common/schedule.c Tue Mar 22 17:30:13 2005 +0000 3.2 +++ b/xen/common/schedule.c Tue Mar 22 19:35:36 2005 +0000 3.3 @@ -486,6 +486,8 @@ static void t_timer_fn(unsigned long unu 3.4 if ( !is_idle_task(ed->domain) && update_dom_time(ed) ) 3.5 send_guest_virq(ed, VIRQ_TIMER); 3.6 3.7 + page_scrub_schedule_work(); 3.8 + 3.9 t_timer[ed->processor].expires = NOW() + MILLISECS(10); 3.10 add_ac_timer(&t_timer[ed->processor]); 3.11 }
4.1 --- a/xen/include/xen/mm.h Tue Mar 22 17:30:13 2005 +0000 4.2 +++ b/xen/include/xen/mm.h Tue Mar 22 19:35:36 2005 +0000 4.3 @@ -2,6 +2,10 @@ 4.4 #ifndef __XEN_MM_H__ 4.5 #define __XEN_MM_H__ 4.6 4.7 +#include <xen/config.h> 4.8 +#include <xen/list.h> 4.9 +#include <xen/spinlock.h> 4.10 + 4.11 struct domain; 4.12 struct pfn_info; 4.13 4.14 @@ -34,6 +38,15 @@ unsigned long avail_domheap_pages(void); 4.15 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) 4.16 #define free_domheap_page(_p) (free_domheap_pages(_p,0)) 4.17 4.18 +/* Automatic page scrubbing for dead domains. */ 4.19 +extern spinlock_t page_scrub_lock; 4.20 +extern struct list_head page_scrub_list; 4.21 +#define page_scrub_schedule_work() \ 4.22 + do { \ 4.23 + if ( !list_empty(&page_scrub_list) ) \ 4.24 + raise_softirq(PAGE_SCRUB_SOFTIRQ); \ 4.25 + } while ( 0 ) 4.26 + 4.27 #include <asm/mm.h> 4.28 4.29 #endif /* __XEN_MM_H__ */
5.1 --- a/xen/include/xen/softirq.h Tue Mar 22 17:30:13 2005 +0000 5.2 +++ b/xen/include/xen/softirq.h Tue Mar 22 19:35:36 2005 +0000 5.3 @@ -7,7 +7,8 @@ 5.4 #define NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ 2 5.5 #define KEYPRESS_SOFTIRQ 3 5.6 #define NMI_SOFTIRQ 4 5.7 -#define NR_SOFTIRQS 5 5.8 +#define PAGE_SCRUB_SOFTIRQ 5 5.9 +#define NR_SOFTIRQS 6 5.10 5.11 #ifndef __ASSEMBLY__ 5.12