ia64/xen-unstable
changeset 365:bee5c5831c41
bitkeeper revision 1.171 (3e9d272dy0ZnTPeYx-n2Qx8CXJLFHw)
sched.h, memory.c, traps.c:
Allow paging out of current LDT pages. Also: flush the shadow LDT mappings on a pagetable switch.
sched.h, memory.c, traps.c:
Allow paging out of current LDT pages. Also: flush the shadow LDT mappings on a pagetable switch.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Apr 16 09:49:33 2003 +0000 (2003-04-16) |
parents | 942eb9bcae13 |
children | 87768b015863 |
files | xen/arch/i386/traps.c xen/common/memory.c xen/include/xeno/sched.h |
line diff
1.1 --- a/xen/arch/i386/traps.c Tue Apr 15 23:19:11 2003 +0000 1.2 +++ b/xen/arch/i386/traps.c Wed Apr 16 09:49:33 2003 +0000 1.3 @@ -259,14 +259,14 @@ asmlinkage void do_page_fault(struct pt_ 1.4 1.5 __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : ); 1.6 1.7 - if ( unlikely(!(regs->xcs & 3)) ) 1.8 - goto fault_in_hypervisor; 1.9 - 1.10 if ( unlikely(addr > PAGE_OFFSET) ) 1.11 goto fault_in_xen_space; 1.12 1.13 bounce_fault: 1.14 1.15 + if ( unlikely(!(regs->xcs & 3)) ) 1.16 + goto fault_in_hypervisor; 1.17 + 1.18 ti = p->thread.traps + 14; 1.19 gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */ 1.20 gtb->cr2 = addr; 1.21 @@ -275,7 +275,12 @@ asmlinkage void do_page_fault(struct pt_ 1.22 gtb->eip = ti->address; 1.23 return; 1.24 1.25 - 1.26 + /* 1.27 + * FAULT IN XEN ADDRESS SPACE: 1.28 + * We only deal with one kind -- a fault in the shadow LDT mapping. 1.29 + * If this occurs we pull a mapping from the guest's LDT, if it is 1.30 + * valid. Otherwise we send the fault up to the guest OS to be handled. 1.31 + */ 1.32 fault_in_xen_space: 1.33 1.34 if ( (addr < LDT_VIRT_START) || 1.35 @@ -316,9 +321,11 @@ asmlinkage void do_page_fault(struct pt_ 1.36 page->flags |= PGT_ldt_page; 1.37 } 1.38 1.39 + /* Success! */ 1.40 get_page_type(page); 1.41 get_page_tot(page); 1.42 p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW); 1.43 + p->mm.shadow_ldt_mapcnt++; 1.44 1.45 spin_unlock(&p->page_lock); 1.46 return;
2.1 --- a/xen/common/memory.c Tue Apr 15 23:19:11 2003 +0000 2.2 +++ b/xen/common/memory.c Wed Apr 16 09:49:33 2003 +0000 2.3 @@ -209,7 +209,9 @@ struct list_head free_list; 2.4 spinlock_t free_list_lock = SPIN_LOCK_UNLOCKED; 2.5 unsigned int free_pfns; 2.6 2.7 -static int tlb_flush[NR_CPUS]; 2.8 +/* Used to defer flushing of memory structures. */ 2.9 +static int flush_tlb[NR_CPUS] __cacheline_aligned; 2.10 + 2.11 2.12 /* 2.13 * init_frametable: 2.14 @@ -222,7 +224,7 @@ void __init init_frametable(unsigned lon 2.15 unsigned long page_index; 2.16 unsigned long flags; 2.17 2.18 - memset(tlb_flush, 0, sizeof(tlb_flush)); 2.19 + memset(flush_tlb, 0, sizeof(flush_tlb)); 2.20 2.21 max_page = nr_pages; 2.22 frame_table_size = nr_pages * sizeof(struct pfn_info); 2.23 @@ -247,6 +249,34 @@ void __init init_frametable(unsigned lon 2.24 } 2.25 2.26 2.27 +static void __invalidate_shadow_ldt(void) 2.28 +{ 2.29 + int i; 2.30 + unsigned long pfn; 2.31 + struct pfn_info *page; 2.32 + 2.33 + current->mm.shadow_ldt_mapcnt = 0; 2.34 + 2.35 + for ( i = 16; i < 32; i++ ) 2.36 + { 2.37 + pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]); 2.38 + if ( pfn == 0 ) continue; 2.39 + current->mm.perdomain_pt[i] = mk_l1_pgentry(0); 2.40 + page = frame_table + pfn; 2.41 + ASSERT((page->flags & PG_type_mask) == PGT_ldt_page); 2.42 + ASSERT((page->flags & PG_domain_mask) == current->domain); 2.43 + ASSERT((page->type_count != 0) && (page->tot_count != 0)); 2.44 + put_page_type(page); 2.45 + put_page_tot(page); 2.46 + } 2.47 +} 2.48 +static inline void invalidate_shadow_ldt(void) 2.49 +{ 2.50 + if ( current->mm.shadow_ldt_mapcnt != 0 ) 2.51 + __invalidate_shadow_ldt(); 2.52 +} 2.53 + 2.54 + 2.55 /* Return original refcnt, or -1 on error. */ 2.56 static int inc_page_refcnt(unsigned long page_nr, unsigned int type) 2.57 { 2.58 @@ -283,6 +313,7 @@ static int inc_page_refcnt(unsigned long 2.59 return get_page_type(page); 2.60 } 2.61 2.62 + 2.63 /* Return new refcnt, or -1 on error. */ 2.64 static int dec_page_refcnt(unsigned long page_nr, unsigned int type) 2.65 { 2.66 @@ -373,6 +404,7 @@ static int get_l2_table(unsigned long pa 2.67 return ret; 2.68 } 2.69 2.70 + 2.71 static int get_l1_table(unsigned long page_nr) 2.72 { 2.73 l1_pgentry_t *p_l1_entry, l1_entry; 2.74 @@ -408,6 +440,7 @@ static int get_l1_table(unsigned long pa 2.75 return ret; 2.76 } 2.77 2.78 + 2.79 static int get_page(unsigned long page_nr, int writeable) 2.80 { 2.81 struct pfn_info *page; 2.82 @@ -450,6 +483,7 @@ static int get_page(unsigned long page_n 2.83 return(0); 2.84 } 2.85 2.86 + 2.87 static void put_l2_table(unsigned long page_nr) 2.88 { 2.89 l2_pgentry_t *p_l2_entry, l2_entry; 2.90 @@ -469,6 +503,7 @@ static void put_l2_table(unsigned long p 2.91 unmap_domain_mem(p_l2_entry); 2.92 } 2.93 2.94 + 2.95 static void put_l1_table(unsigned long page_nr) 2.96 { 2.97 l1_pgentry_t *p_l1_entry, l1_entry; 2.98 @@ -492,6 +527,7 @@ static void put_l1_table(unsigned long p 2.99 unmap_domain_mem(p_l1_entry-1); 2.100 } 2.101 2.102 + 2.103 static void put_page(unsigned long page_nr, int writeable) 2.104 { 2.105 struct pfn_info *page; 2.106 @@ -502,10 +538,19 @@ static void put_page(unsigned long page_ 2.107 ((page_type_count(page) != 0) && 2.108 ((page->flags & PG_type_mask) == PGT_writeable_page) && 2.109 ((page->flags & PG_need_flush) == PG_need_flush))); 2.110 - if ( writeable && (put_page_type(page) == 0) ) 2.111 + if ( writeable ) 2.112 { 2.113 - tlb_flush[smp_processor_id()] = 1; 2.114 - page->flags &= ~PG_need_flush; 2.115 + if ( put_page_type(page) == 0 ) 2.116 + { 2.117 + flush_tlb[smp_processor_id()] = 1; 2.118 + page->flags &= ~PG_need_flush; 2.119 + } 2.120 + } 2.121 + else if ( unlikely(((page->flags & PG_type_mask) == PGT_ldt_page) && 2.122 + (page_type_count(page) != 0)) ) 2.123 + { 2.124 + /* We expect this is rare so we just blow the entire shadow LDT. */ 2.125 + invalidate_shadow_ldt(); 2.126 } 2.127 put_page_tot(page); 2.128 } 2.129 @@ -685,15 +730,17 @@ static int do_extended_command(unsigned 2.130 { 2.131 put_l2_table(pagetable_val(current->mm.pagetable) >> PAGE_SHIFT); 2.132 current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT); 2.133 + invalidate_shadow_ldt(); 2.134 + flush_tlb[smp_processor_id()] = 1; 2.135 } 2.136 else 2.137 { 2.138 MEM_LOG("Error while installing new baseptr %08lx %d", ptr, err); 2.139 } 2.140 - /* fall through */ 2.141 + break; 2.142 2.143 case PGEXT_TLB_FLUSH: 2.144 - tlb_flush[smp_processor_id()] = 1; 2.145 + flush_tlb[smp_processor_id()] = 1; 2.146 break; 2.147 2.148 case PGEXT_INVLPG: 2.149 @@ -702,7 +749,6 @@ static int do_extended_command(unsigned 2.150 2.151 case PGEXT_SET_LDT: 2.152 { 2.153 - int i; 2.154 unsigned long ents = val >> PGEXT_CMD_SHIFT; 2.155 if ( ((ptr & (PAGE_SIZE-1)) != 0) || 2.156 (ents > 8192) || 2.157 @@ -717,20 +763,8 @@ static int do_extended_command(unsigned 2.158 { 2.159 if ( current->mm.ldt_ents != 0 ) 2.160 { 2.161 - /* Tear down the old LDT. */ 2.162 - for ( i = 16; i < 32; i++ ) 2.163 - { 2.164 - pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]); 2.165 - if ( pfn == 0 ) continue; 2.166 - current->mm.perdomain_pt[i] = mk_l1_pgentry(0); 2.167 - page = frame_table + pfn; 2.168 - ASSERT((page->flags & PG_type_mask) == PGT_ldt_page); 2.169 - ASSERT((page->flags & PG_domain_mask) == current->domain); 2.170 - ASSERT((page->type_count != 0) && (page->tot_count != 0)); 2.171 - put_page_type(page); 2.172 - put_page_tot(page); 2.173 - } 2.174 - tlb_flush[smp_processor_id()] = 1; 2.175 + invalidate_shadow_ldt(); 2.176 + flush_tlb[smp_processor_id()] = 1; 2.177 } 2.178 current->mm.ldt_base = ptr; 2.179 current->mm.ldt_ents = ents; 2.180 @@ -748,6 +782,7 @@ static int do_extended_command(unsigned 2.181 return err; 2.182 } 2.183 2.184 + 2.185 int do_process_page_updates(page_update_request_t *ureqs, int count) 2.186 { 2.187 page_update_request_t req; 2.188 @@ -860,9 +895,9 @@ int do_process_page_updates(page_update_ 2.189 ureqs++; 2.190 } 2.191 2.192 - if ( tlb_flush[smp_processor_id()] ) 2.193 + if ( flush_tlb[smp_processor_id()] ) 2.194 { 2.195 - tlb_flush[smp_processor_id()] = 0; 2.196 + flush_tlb[smp_processor_id()] = 0; 2.197 __write_cr3_counted(pagetable_val(current->mm.pagetable)); 2.198 2.199 }
3.1 --- a/xen/include/xeno/sched.h Tue Apr 15 23:19:11 2003 +0000 3.2 +++ b/xen/include/xeno/sched.h Wed Apr 16 09:49:33 2003 +0000 3.3 @@ -30,7 +30,7 @@ struct mm_struct { 3.4 l1_pgentry_t *perdomain_pt; 3.5 pagetable_t pagetable; 3.6 /* Current LDT details. */ 3.7 - unsigned long ldt_base, ldt_ents; 3.8 + unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt; 3.9 /* Next entry is passed to LGDT on domain switch. */ 3.10 char gdt[6]; 3.11 };