ia64/xen-unstable
changeset 101:540e4f4c8e38
bitkeeper revision 1.15.1.10 (3e352084u4K_4YmnX6nhqxaJQrO-hQ)
Merge labyrinth.cl.cam.ac.uk:/usr/groups/xeno/BK/xeno
into labyrinth.cl.cam.ac.uk:/usr/groups/xeno/users/akw27/xeno
Merge labyrinth.cl.cam.ac.uk:/usr/groups/xeno/BK/xeno
into labyrinth.cl.cam.ac.uk:/usr/groups/xeno/users/akw27/xeno
author | akw27@labyrinth.cl.cam.ac.uk |
---|---|
date | Mon Jan 27 12:05:24 2003 +0000 (2003-01-27) |
parents | ce656d157bbf a8063692097a |
children | 033b3540eda0 0ce34da1b61d |
files | .rootkeys xen-2.4.16/Makefile xen-2.4.16/common/domain.c xen-2.4.16/common/domain_page.c xen-2.4.16/common/memory.c xen-2.4.16/include/asm-i386/domain_page.h |
line diff
1.1 --- a/.rootkeys Thu Jan 23 11:38:00 2003 +0000 1.2 +++ b/.rootkeys Mon Jan 27 12:05:24 2003 +0000 1.3 @@ -39,6 +39,7 @@ 3ddb79bddEYJbcURvqqcx99Yl2iAhQ xen-2.4.1 1.4 3ddb79bdrqnW93GR9gZk1OJe1qK-iQ xen-2.4.16/common/brlock.c 1.5 3ddb79bdLX_P6iB7ILiblRLWvebapg xen-2.4.16/common/dom0_ops.c 1.6 3ddb79bdYO5D8Av12NHqPeSviav7cg xen-2.4.16/common/domain.c 1.7 +3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen-2.4.16/common/domain_page.c 1.8 3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen-2.4.16/common/event.c 1.9 3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen-2.4.16/common/kernel.c 1.10 3ddb79bduhSEZI8xa7IbGQCpap5y2A xen-2.4.16/common/lib.c
2.1 --- a/xen-2.4.16/Makefile Thu Jan 23 11:38:00 2003 +0000 2.2 +++ b/xen-2.4.16/Makefile Mon Jan 27 12:05:24 2003 +0000 2.3 @@ -24,7 +24,6 @@ clean: delete-links 2.4 $(MAKE) -C net 2.5 $(MAKE) -C drivers 2.6 $(MAKE) -C arch/$(ARCH) 2.7 - gzip -f -9 < $(TARGET) > $(TARGET).gz 2.8 2.9 make-links: 2.10 ln -sf xeno include/linux
3.1 --- a/xen-2.4.16/common/domain.c Thu Jan 23 11:38:00 2003 +0000 3.2 +++ b/xen-2.4.16/common/domain.c Mon Jan 27 12:05:24 2003 +0000 3.3 @@ -410,7 +410,7 @@ int setup_guestos(struct task_struct *p, 3.4 unsigned int ft_size = 0; 3.5 start_info_t *virt_startinfo_address; 3.6 unsigned long long time; 3.7 - l2_pgentry_t *l2tab; 3.8 + l2_pgentry_t *l2tab, *l2start; 3.9 l1_pgentry_t *l1tab = NULL; 3.10 struct pfn_info *page = NULL; 3.11 net_ring_t *net_ring; 3.12 @@ -465,7 +465,7 @@ int setup_guestos(struct task_struct *p, 3.13 * filled in by now !! 3.14 */ 3.15 phys_l2tab = ALLOC_FRAME_FROM_DOMAIN(); 3.16 - l2tab = map_domain_mem(phys_l2tab); 3.17 + l2start = l2tab = map_domain_mem(phys_l2tab); 3.18 memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE); 3.19 l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = 3.20 mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR); 3.21 @@ -486,17 +486,16 @@ int setup_guestos(struct task_struct *p, 3.22 if(dom == 0) 3.23 ft_size = frame_table_size; 3.24 3.25 - phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t); 3.26 + l2tab += l2_table_offset(virt_load_address); 3.27 for ( cur_address = start_address; 3.28 cur_address != (end_address + PAGE_SIZE + ft_size); 3.29 cur_address += PAGE_SIZE ) 3.30 { 3.31 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) 3.32 { 3.33 + if ( l1tab != NULL ) unmap_domain_mem(l1tab-1); 3.34 phys_l1tab = ALLOC_FRAME_FROM_DOMAIN(); 3.35 - l2tab = map_domain_mem(phys_l2tab); 3.36 - *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT); 3.37 - phys_l2tab += sizeof(l2_pgentry_t); 3.38 + *l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT); 3.39 l1tab = map_domain_mem(phys_l1tab); 3.40 clear_page(l1tab); 3.41 l1tab += l1_table_offset( 3.42 @@ -512,43 +511,39 @@ int setup_guestos(struct task_struct *p, 3.43 page->type_count = page->tot_count = 1; 3.44 } 3.45 } 3.46 + unmap_domain_mem(l1tab-1); 3.47 3.48 /* Pages that are part of page tables must be read-only. */ 3.49 vaddr = virt_load_address + alloc_address - start_address; 3.50 - phys_l2tab = pagetable_val(p->mm.pagetable) + 3.51 - (l2_table_offset(vaddr) * sizeof(l2_pgentry_t)); 3.52 - l2tab = map_domain_mem(phys_l2tab); 3.53 - phys_l1tab = l2_pgentry_to_phys(*l2tab) + 3.54 - (l1_table_offset(vaddr) * sizeof(l1_pgentry_t)); 3.55 - phys_l2tab += sizeof(l2_pgentry_t); 3.56 - l1tab = map_domain_mem(phys_l1tab); 3.57 + l2tab = l2start + l2_table_offset(vaddr); 3.58 + l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 3.59 + l1tab += l1_table_offset(vaddr); 3.60 + l2tab++; 3.61 for ( cur_address = alloc_address; 3.62 cur_address != end_address; 3.63 cur_address += PAGE_SIZE ) 3.64 { 3.65 - *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW); 3.66 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) 3.67 { 3.68 - l2tab = map_domain_mem(phys_l2tab); 3.69 - phys_l1tab = l2_pgentry_to_phys(*l2tab); 3.70 - phys_l2tab += sizeof(l2_pgentry_t); 3.71 - l1tab = map_domain_mem(phys_l1tab); 3.72 + unmap_domain_mem(l1tab-1); 3.73 + l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 3.74 + l2tab++; 3.75 } 3.76 + *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW); 3.77 page = frame_table + (cur_address >> PAGE_SHIFT); 3.78 page->flags = dom | PGT_l1_page_table; 3.79 page->tot_count++; 3.80 } 3.81 + unmap_domain_mem(l1tab-1); 3.82 page->flags = dom | PGT_l2_page_table; 3.83 3.84 /* Map in the the shared info structure. */ 3.85 virt_shinfo_address = end_address - start_address + virt_load_address; 3.86 - phys_l2tab = pagetable_val(p->mm.pagetable) + 3.87 - (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t)); 3.88 - l2tab = map_domain_mem(phys_l2tab); 3.89 - phys_l1tab = l2_pgentry_to_phys(*l2tab) + 3.90 - (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t)); 3.91 - l1tab = map_domain_mem(phys_l1tab); 3.92 + l2tab = l2start + l2_table_offset(virt_shinfo_address); 3.93 + l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 3.94 + l1tab += l1_table_offset(virt_shinfo_address); 3.95 *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT); 3.96 + unmap_domain_mem(l1tab); 3.97 3.98 /* Set up shared info area. */ 3.99 rdtscll(time); 3.100 @@ -565,13 +560,11 @@ int setup_guestos(struct task_struct *p, 3.101 cur_address < virt_ftable_end_addr; 3.102 cur_address += PAGE_SIZE) 3.103 { 3.104 - phys_l2tab = pagetable_val(p->mm.pagetable) + 3.105 - (l2_table_offset(cur_address) * sizeof(l2_pgentry_t)); 3.106 - l2tab = map_domain_mem(phys_l2tab); 3.107 - phys_l1tab = l2_pgentry_to_phys(*l2tab) + 3.108 - (l1_table_offset(cur_address) * sizeof(l1_pgentry_t)); 3.109 - l1tab = map_domain_mem(phys_l1tab); 3.110 + l2tab = l2start + l2_table_offset(cur_address); 3.111 + l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 3.112 + l1tab += l1_table_offset(cur_address); 3.113 *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT); 3.114 + unmap_domain_mem(l1tab); 3.115 ft_mapping += PAGE_SIZE; 3.116 } 3.117 } 3.118 @@ -580,6 +573,8 @@ int setup_guestos(struct task_struct *p, 3.119 (alloc_address - start_address - PAGE_SIZE + virt_load_address); 3.120 virt_stack_address = (unsigned long)virt_startinfo_address; 3.121 3.122 + unmap_domain_mem(l2start); 3.123 + 3.124 /* Install the new page tables. */ 3.125 __cli(); 3.126 __write_cr3_counted(pagetable_val(p->mm.pagetable));
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/xen-2.4.16/common/domain_page.c Mon Jan 27 12:05:24 2003 +0000 4.3 @@ -0,0 +1,67 @@ 4.4 +/****************************************************************************** 4.5 + * domain_page.h 4.6 + * 4.7 + * Allow temporary mapping of domain pages. Based on ideas from the 4.8 + * Linux PKMAP code -- the copyrights and credits are retained below. 4.9 + */ 4.10 + 4.11 +/* 4.12 + * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de 4.13 + * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de * 4.14 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 4.15 + */ 4.16 + 4.17 +#include <xeno/config.h> 4.18 +#include <xeno/sched.h> 4.19 +#include <xeno/mm.h> 4.20 +#include <asm/domain_page.h> 4.21 +#include <asm/pgalloc.h> 4.22 + 4.23 +static unsigned int map_idx[NR_CPUS]; 4.24 + 4.25 +/* Use a spare PTE bit to mark entries ready for recycling. */ 4.26 +#define READY_FOR_TLB_FLUSH (1<<10) 4.27 + 4.28 +static void flush_all_ready_maps(void) 4.29 +{ 4.30 + unsigned long *cache = mapcache[smp_processor_id()]; 4.31 + 4.32 + /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */ 4.33 + do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; } 4.34 + while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 ); 4.35 + 4.36 + local_flush_tlb(); 4.37 +} 4.38 + 4.39 + 4.40 +void *map_domain_mem(unsigned long pa) 4.41 +{ 4.42 + unsigned long va; 4.43 + int cpu = smp_processor_id(); 4.44 + unsigned int idx; 4.45 + unsigned long *cache = mapcache[cpu]; 4.46 + unsigned long flags; 4.47 + 4.48 + local_irq_save(flags); 4.49 + 4.50 + for ( ; ; ) 4.51 + { 4.52 + idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1); 4.53 + if ( idx == 0 ) flush_all_ready_maps(); 4.54 + if ( cache[idx] == 0 ) break; 4.55 + } 4.56 + 4.57 + cache[idx] = (pa & PAGE_MASK) | PAGE_HYPERVISOR; 4.58 + 4.59 + local_irq_restore(flags); 4.60 + 4.61 + va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK); 4.62 + return (void *)va; 4.63 +} 4.64 + 4.65 +void unmap_domain_mem(void *va) 4.66 +{ 4.67 + unsigned int idx; 4.68 + idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; 4.69 + mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH; 4.70 +}
5.1 --- a/xen-2.4.16/common/memory.c Thu Jan 23 11:38:00 2003 +0000 5.2 +++ b/xen-2.4.16/common/memory.c Mon Jan 27 12:05:24 2003 +0000 5.3 @@ -182,13 +182,17 @@ 5.4 #define MEM_LOG(_f, _a...) ((void)0) 5.5 #endif 5.6 5.7 +/* Domain 0 is allowed to submit requests on behalf of others. */ 5.8 +#define DOMAIN_OKAY(_f) \ 5.9 + ((((_f) & PG_domain_mask) == current->domain) || (current->domain == 0)) 5.10 + 5.11 /* 'get' checks parameter for validity before inc'ing refcnt. */ 5.12 static int get_l2_table(unsigned long page_nr); 5.13 static int get_l1_table(unsigned long page_nr); 5.14 static int get_page(unsigned long page_nr, int writeable); 5.15 static int inc_page_refcnt(unsigned long page_nr, unsigned int type); 5.16 /* 'put' does no checking because if refcnt not zero, entity must be valid. */ 5.17 -static int put_l2_table(unsigned long page_nr); 5.18 +static void put_l2_table(unsigned long page_nr); 5.19 static void put_l1_table(unsigned long page_nr); 5.20 static void put_page(unsigned long page_nr, int writeable); 5.21 static int dec_page_refcnt(unsigned long page_nr, unsigned int type); 5.22 @@ -248,14 +252,14 @@ static int inc_page_refcnt(unsigned long 5.23 if ( page_nr >= max_page ) 5.24 { 5.25 MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page); 5.26 - return(-1); 5.27 + return -1; 5.28 } 5.29 page = frame_table + page_nr; 5.30 flags = page->flags; 5.31 - if ( (flags & PG_domain_mask) != current->domain ) 5.32 + if ( !DOMAIN_OKAY(flags) ) 5.33 { 5.34 MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask); 5.35 - return(-1); 5.36 + return -1; 5.37 } 5.38 if ( (flags & PG_type_mask) != type ) 5.39 { 5.40 @@ -264,13 +268,13 @@ static int inc_page_refcnt(unsigned long 5.41 MEM_LOG("Page %08lx bad type/count (%08lx!=%08x) cnt=%ld", 5.42 page_nr << PAGE_SHIFT, 5.43 flags & PG_type_mask, type, page_type_count(page)); 5.44 - return(-1); 5.45 + return -1; 5.46 } 5.47 page->flags |= type; 5.48 } 5.49 5.50 get_page_tot(page); 5.51 - return(get_page_type(page)); 5.52 + return get_page_type(page); 5.53 } 5.54 5.55 /* Return new refcnt, or -1 on error. */ 5.56 @@ -282,21 +286,46 @@ static int dec_page_refcnt(unsigned long 5.57 if ( page_nr >= max_page ) 5.58 { 5.59 MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page); 5.60 - return(-1); 5.61 + return -1; 5.62 } 5.63 page = frame_table + page_nr; 5.64 - if ( (page->flags & (PG_type_mask | PG_domain_mask)) != 5.65 - (type | current->domain) ) 5.66 + if ( !DOMAIN_OKAY(page->flags) || 5.67 + ((page->flags & PG_type_mask) != type) ) 5.68 { 5.69 MEM_LOG("Bad page type/domain (dom=%ld) (type %ld != expected %d)", 5.70 page->flags & PG_domain_mask, page->flags & PG_type_mask, 5.71 type); 5.72 - return(-1); 5.73 + return -1; 5.74 } 5.75 ASSERT(page_type_count(page) != 0); 5.76 if ( (ret = put_page_type(page)) == 0 ) page->flags &= ~PG_type_mask; 5.77 put_page_tot(page); 5.78 - return(ret); 5.79 + return ret; 5.80 +} 5.81 + 5.82 + 5.83 +/* We allow a L2 table to map itself, to achieve a linear pagetable. */ 5.84 +/* NB. There's no need for a put_twisted_l2_table() function!! */ 5.85 +static int get_twisted_l2_table(unsigned long entry_pfn, l2_pgentry_t l2e) 5.86 +{ 5.87 + unsigned long l2v = l2_pgentry_val(l2e); 5.88 + 5.89 + /* Clearly the mapping must be read-only :-) */ 5.90 + if ( (l2v & _PAGE_RW) ) 5.91 + { 5.92 + MEM_LOG("Attempt to install twisted L2 entry with write permissions"); 5.93 + return -1; 5.94 + } 5.95 + 5.96 + /* This is a sufficient final check. */ 5.97 + if ( (l2v >> PAGE_SHIFT) != entry_pfn ) 5.98 + { 5.99 + MEM_LOG("L2 tables may not map _other_ L2 tables!\n"); 5.100 + return -1; 5.101 + } 5.102 + 5.103 + /* We don't bump the reference counts. */ 5.104 + return 0; 5.105 } 5.106 5.107 5.108 @@ -306,7 +335,7 @@ static int get_l2_table(unsigned long pa 5.109 int i, ret=0; 5.110 5.111 ret = inc_page_refcnt(page_nr, PGT_l2_page_table); 5.112 - if ( ret != 0 ) return((ret < 0) ? ret : 0); 5.113 + if ( ret != 0 ) return (ret < 0) ? ret : 0; 5.114 5.115 /* NEW level-2 page table! Deal with every PDE in the table. */ 5.116 p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT); 5.117 @@ -318,12 +347,13 @@ static int get_l2_table(unsigned long pa 5.118 { 5.119 MEM_LOG("Bad L2 page type settings %04lx", 5.120 l2_pgentry_val(l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)); 5.121 - return(-1); 5.122 + ret = -1; 5.123 + goto out; 5.124 } 5.125 + /* Assume we're mapping an L1 table, falling back to twisted L2. */ 5.126 ret = get_l1_table(l2_pgentry_to_pagenr(l2_entry)); 5.127 - if ( ret ) return(ret); 5.128 - p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) + 5.129 - ((i+1) * sizeof(l2_pgentry_t))); 5.130 + if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry); 5.131 + if ( ret ) goto out; 5.132 } 5.133 5.134 /* Now we simply slap in our high mapping. */ 5.135 @@ -334,7 +364,9 @@ static int get_l2_table(unsigned long pa 5.136 DOMAIN_ENTRIES_PER_L2_PAGETABLE] = 5.137 mk_l2_pgentry(__pa(current->mm.perdomain_pt) | __PAGE_HYPERVISOR); 5.138 5.139 - return(ret); 5.140 + out: 5.141 + unmap_domain_mem(p_l2_entry); 5.142 + return ret; 5.143 } 5.144 5.145 static int get_l1_table(unsigned long page_nr) 5.146 @@ -344,7 +376,7 @@ static int get_l1_table(unsigned long pa 5.147 5.148 /* Update ref count for page pointed at by PDE. */ 5.149 ret = inc_page_refcnt(page_nr, PGT_l1_page_table); 5.150 - if ( ret != 0 ) return((ret < 0) ? ret : 0); 5.151 + if ( ret != 0 ) return (ret < 0) ? ret : 0; 5.152 5.153 /* NEW level-1 page table! Deal with every PTE in the table. */ 5.154 p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT); 5.155 @@ -358,14 +390,18 @@ static int get_l1_table(unsigned long pa 5.156 MEM_LOG("Bad L1 page type settings %04lx", 5.157 l1_pgentry_val(l1_entry) & 5.158 (_PAGE_GLOBAL|_PAGE_PAT)); 5.159 - return(-1); 5.160 + ret = -1; 5.161 + goto out; 5.162 } 5.163 ret = get_page(l1_pgentry_to_pagenr(l1_entry), 5.164 l1_pgentry_val(l1_entry) & _PAGE_RW); 5.165 - if ( ret ) return(ret); 5.166 + if ( ret ) goto out; 5.167 } 5.168 5.169 - return(ret); 5.170 + out: 5.171 + /* Make sure we unmap the right page! */ 5.172 + unmap_domain_mem(p_l1_entry-1); 5.173 + return ret; 5.174 } 5.175 5.176 static int get_page(unsigned long page_nr, int writeable) 5.177 @@ -381,7 +417,7 @@ static int get_page(unsigned long page_n 5.178 } 5.179 page = frame_table + page_nr; 5.180 flags = page->flags; 5.181 - if ( (flags & PG_domain_mask) != current->domain ) 5.182 + if ( !DOMAIN_OKAY(flags) ) 5.183 { 5.184 MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask); 5.185 return(-1); 5.186 @@ -408,28 +444,23 @@ static int get_page(unsigned long page_n 5.187 return(0); 5.188 } 5.189 5.190 -static int put_l2_table(unsigned long page_nr) 5.191 +static void put_l2_table(unsigned long page_nr) 5.192 { 5.193 l2_pgentry_t *p_l2_entry, l2_entry; 5.194 - int i, ret; 5.195 + int i; 5.196 5.197 - ret = dec_page_refcnt(page_nr, PGT_l2_page_table); 5.198 - if ( ret != 0 ) return((ret < 0) ? ret : 0); 5.199 + if ( dec_page_refcnt(page_nr, PGT_l2_page_table) ) return; 5.200 5.201 /* We had last reference to level-2 page table. Free the PDEs. */ 5.202 p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT); 5.203 - for ( i = 0; i < HYPERVISOR_ENTRIES_PER_L2_PAGETABLE; i++ ) 5.204 + for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) 5.205 { 5.206 l2_entry = *p_l2_entry++; 5.207 if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) ) 5.208 - { 5.209 put_l1_table(l2_pgentry_to_pagenr(l2_entry)); 5.210 - p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) + 5.211 - ((i+1) * sizeof(l2_pgentry_t))); 5.212 - } 5.213 } 5.214 5.215 - return(0); 5.216 + unmap_domain_mem(p_l2_entry); 5.217 } 5.218 5.219 static void put_l1_table(unsigned long page_nr) 5.220 @@ -437,7 +468,7 @@ static void put_l1_table(unsigned long p 5.221 l1_pgentry_t *p_l1_entry, l1_entry; 5.222 int i; 5.223 5.224 - if ( dec_page_refcnt(page_nr, PGT_l1_page_table) != 0 ) return; 5.225 + if ( dec_page_refcnt(page_nr, PGT_l1_page_table) ) return; 5.226 5.227 /* We had last reference to level-1 page table. Free the PTEs. */ 5.228 p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT); 5.229 @@ -450,6 +481,9 @@ static void put_l1_table(unsigned long p 5.230 l1_pgentry_val(l1_entry) & _PAGE_RW); 5.231 } 5.232 } 5.233 + 5.234 + /* Make sure we unmap the right page! */ 5.235 + unmap_domain_mem(p_l1_entry-1); 5.236 } 5.237 5.238 static void put_page(unsigned long page_nr, int writeable) 5.239 @@ -457,7 +491,7 @@ static void put_page(unsigned long page_ 5.240 struct pfn_info *page; 5.241 ASSERT(page_nr < max_page); 5.242 page = frame_table + page_nr; 5.243 - ASSERT((page->flags & PG_domain_mask) == current->domain); 5.244 + ASSERT(DOMAIN_OKAY(page->flags)); 5.245 ASSERT((!writeable) || 5.246 ((page_type_count(page) != 0) && 5.247 ((page->flags & PG_type_mask) == PGT_writeable_page))); 5.248 @@ -485,12 +519,6 @@ static int mod_l2_entry(unsigned long pa 5.249 goto fail; 5.250 } 5.251 5.252 - /* 5.253 - * Write the new value while pointer is still valid. The mapping cache 5.254 - * entry for p_l2_entry may get clobbered by {put,get}_l1_table. 5.255 - */ 5.256 - *p_l2_entry = new_l2_entry; 5.257 - 5.258 if ( (l2_pgentry_val(new_l2_entry) & _PAGE_PRESENT) ) 5.259 { 5.260 if ( (l2_pgentry_val(new_l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)) ) 5.261 @@ -509,7 +537,9 @@ static int mod_l2_entry(unsigned long pa 5.262 put_l1_table(l2_pgentry_to_pagenr(old_l2_entry)); 5.263 } 5.264 5.265 - if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) ) 5.266 + /* Assume we're mapping an L1 table, falling back to twisted L2. */ 5.267 + if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) && 5.268 + get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) ) 5.269 goto fail; 5.270 } 5.271 } 5.272 @@ -518,16 +548,13 @@ static int mod_l2_entry(unsigned long pa 5.273 put_l1_table(l2_pgentry_to_pagenr(old_l2_entry)); 5.274 } 5.275 5.276 - return(0); 5.277 + *p_l2_entry = new_l2_entry; 5.278 + unmap_domain_mem(p_l2_entry); 5.279 + return 0; 5.280 5.281 fail: 5.282 - /* 5.283 - * On failure we put the old value back. We need to regrab the 5.284 - * mapping of the physical page frame. 5.285 - */ 5.286 - p_l2_entry = map_domain_mem(pa); 5.287 - *p_l2_entry = old_l2_entry; 5.288 - return(-1); 5.289 + unmap_domain_mem(p_l2_entry); 5.290 + return -1; 5.291 } 5.292 5.293 5.294 @@ -572,12 +599,13 @@ static int mod_l1_entry(unsigned long pa 5.295 l1_pgentry_val(old_l1_entry) & _PAGE_RW); 5.296 } 5.297 5.298 - /* p_l1_entry is still valid here */ 5.299 *p_l1_entry = new_l1_entry; 5.300 + unmap_domain_mem(p_l1_entry); 5.301 + return 0; 5.302 5.303 - return(0); 5.304 fail: 5.305 - return(-1); 5.306 + unmap_domain_mem(p_l1_entry); 5.307 + return -1; 5.308 } 5.309 5.310 5.311 @@ -615,7 +643,7 @@ static int do_extended_command(unsigned 5.312 break; 5.313 5.314 case PGEXT_UNPIN_TABLE: 5.315 - if ( (page->flags & PG_domain_mask) != current->domain ) 5.316 + if ( !DOMAIN_OKAY(page->flags) ) 5.317 { 5.318 err = 1; 5.319 MEM_LOG("Page %08lx bad domain (dom=%ld)", 5.320 @@ -701,7 +729,7 @@ int do_process_page_updates(page_update_ 5.321 case PGREQ_NORMAL: 5.322 page = frame_table + pfn; 5.323 flags = page->flags; 5.324 - if ( (flags & PG_domain_mask) == current->domain ) 5.325 + if ( DOMAIN_OKAY(flags) ) 5.326 { 5.327 switch ( (flags & PG_type_mask) ) 5.328 { 5.329 @@ -731,8 +759,9 @@ int do_process_page_updates(page_update_ 5.330 flags = page->flags; 5.331 if ( (flags | current->domain) == PGT_l1_page_table ) 5.332 { 5.333 - 5.334 - *(unsigned long *)map_domain_mem(cur.ptr) = cur.val; 5.335 + unsigned long *va = map_domain_mem(cur.ptr); 5.336 + *va = cur.val; 5.337 + unmap_domain_mem(va); 5.338 err = 0; 5.339 } 5.340 else
6.1 --- a/xen-2.4.16/include/asm-i386/domain_page.h Thu Jan 23 11:38:00 2003 +0000 6.2 +++ b/xen-2.4.16/include/asm-i386/domain_page.h Mon Jan 27 12:05:24 2003 +0000 6.3 @@ -9,6 +9,21 @@ 6.4 6.5 extern unsigned long *mapcache[NR_CPUS]; 6.6 #define MAPCACHE_ENTRIES 1024 6.7 + 6.8 +/* 6.9 + * Maps a given physical address, returning corresponding virtual address. 6.10 + * The entire page containing that VA is now accessible until a 6.11 + * corresponding call to unmap_domain_mem(). 6.12 + */ 6.13 +extern void *map_domain_mem(unsigned long pa); 6.14 + 6.15 +/* 6.16 + * Pass a VA within a page previously mapped with map_domain_mem(). 6.17 + * That page will then be removed from the mapping lists. 6.18 + */ 6.19 +extern void unmap_domain_mem(void *va); 6.20 + 6.21 +#if 0 6.22 #define MAPCACHE_HASH(_pfn) ((_pfn) & (MAPCACHE_ENTRIES-1)) 6.23 static inline void *map_domain_mem(unsigned long pa) 6.24 { 6.25 @@ -25,3 +40,4 @@ static inline void *map_domain_mem(unsig 6.26 } 6.27 return va; 6.28 } 6.29 +#endif