direct-io.hg
changeset 3477:d331c6994d28
bitkeeper revision 1.1159.223.12 (41f14d3cE4GADmEAEr6XE9nXX4dyGw)
Common-code cleanups. Moved arch-specific code out into arch/x86
and asm-x86.
Common-code cleanups. Moved arch-specific code out into arch/x86
and asm-x86.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Fri Jan 21 18:43:08 2005 +0000 (2005-01-21) |
parents | 439f4f511898 |
children | 46c14b1a4351 67c3042307d4 |
files | xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/memory.c xen/common/Makefile xen/common/dom0_ops.c xen/common/domain.c xen/common/elf.c xen/common/keyhandler.c xen/common/schedule.c xen/common/softirq.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/mm.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/xen.h xen/include/xen/domain.h xen/include/xen/grant_table.h xen/include/xen/slab.h |
line diff
1.1 --- a/xen/arch/x86/dom0_ops.c Fri Jan 21 02:08:45 2005 +0000 1.2 +++ b/xen/arch/x86/dom0_ops.c Fri Jan 21 18:43:08 2005 +0000 1.3 @@ -137,6 +137,164 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 1.4 } 1.5 break; 1.6 1.7 + case DOM0_IOPL: 1.8 + { 1.9 + extern long do_iopl(domid_t, unsigned int); 1.10 + ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl); 1.11 + } 1.12 + break; 1.13 + 1.14 + case DOM0_PHYSINFO: 1.15 + { 1.16 + dom0_physinfo_t *pi = &op->u.physinfo; 1.17 + 1.18 + pi->ht_per_core = opt_noht ? 1 : ht_per_core; 1.19 + pi->cores = smp_num_cpus / pi->ht_per_core; 1.20 + pi->total_pages = max_page; 1.21 + pi->free_pages = avail_domheap_pages(); 1.22 + pi->cpu_khz = cpu_khz; 1.23 + 1.24 + copy_to_user(u_dom0_op, op, sizeof(*op)); 1.25 + ret = 0; 1.26 + } 1.27 + break; 1.28 + 1.29 + case DOM0_GETPAGEFRAMEINFO: 1.30 + { 1.31 + struct pfn_info *page; 1.32 + unsigned long pfn = op->u.getpageframeinfo.pfn; 1.33 + domid_t dom = op->u.getpageframeinfo.domain; 1.34 + struct domain *d; 1.35 + 1.36 + ret = -EINVAL; 1.37 + 1.38 + if ( unlikely(pfn >= max_page) || 1.39 + unlikely((d = find_domain_by_id(dom)) == NULL) ) 1.40 + break; 1.41 + 1.42 + page = &frame_table[pfn]; 1.43 + 1.44 + if ( likely(get_page(page, d)) ) 1.45 + { 1.46 + ret = 0; 1.47 + 1.48 + op->u.getpageframeinfo.type = NOTAB; 1.49 + 1.50 + if ( (page->u.inuse.type_info & PGT_count_mask) != 0 ) 1.51 + { 1.52 + switch ( page->u.inuse.type_info & PGT_type_mask ) 1.53 + { 1.54 + case PGT_l1_page_table: 1.55 + op->u.getpageframeinfo.type = L1TAB; 1.56 + break; 1.57 + case PGT_l2_page_table: 1.58 + op->u.getpageframeinfo.type = L2TAB; 1.59 + break; 1.60 + case PGT_l3_page_table: 1.61 + op->u.getpageframeinfo.type = L3TAB; 1.62 + break; 1.63 + case PGT_l4_page_table: 1.64 + op->u.getpageframeinfo.type = L4TAB; 1.65 + break; 1.66 + } 1.67 + } 1.68 + 1.69 + put_page(page); 1.70 + } 1.71 + 1.72 + put_domain(d); 1.73 + 1.74 + copy_to_user(u_dom0_op, op, sizeof(*op)); 1.75 + } 1.76 + break; 1.77 + 1.78 + case DOM0_GETPAGEFRAMEINFO2: 1.79 + { 1.80 +#define GPF2_BATCH 128 1.81 + int n,j; 1.82 + int num = op->u.getpageframeinfo2.num; 1.83 + domid_t dom = op->u.getpageframeinfo2.domain; 1.84 + unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array; 1.85 + struct domain *d; 1.86 + unsigned long l_arr[GPF2_BATCH]; 1.87 + ret = -ESRCH; 1.88 + 1.89 + if ( unlikely((d = find_domain_by_id(dom)) == NULL) ) 1.90 + break; 1.91 + 1.92 + if ( unlikely(num > 1024) ) 1.93 + { 1.94 + ret = -E2BIG; 1.95 + break; 1.96 + } 1.97 + 1.98 + ret = 0; 1.99 + for( n = 0; n < num; ) 1.100 + { 1.101 + int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n); 1.102 + 1.103 + if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) ) 1.104 + { 1.105 + ret = -EINVAL; 1.106 + break; 1.107 + } 1.108 + 1.109 + for( j = 0; j < k; j++ ) 1.110 + { 1.111 + struct pfn_info *page; 1.112 + unsigned long mfn = l_arr[j]; 1.113 + 1.114 + if ( unlikely(mfn >= max_page) ) 1.115 + goto e2_err; 1.116 + 1.117 + page = &frame_table[mfn]; 1.118 + 1.119 + if ( likely(get_page(page, d)) ) 1.120 + { 1.121 + unsigned long type = 0; 1.122 + 1.123 + switch( page->u.inuse.type_info & PGT_type_mask ) 1.124 + { 1.125 + case PGT_l1_page_table: 1.126 + type = L1TAB; 1.127 + break; 1.128 + case PGT_l2_page_table: 1.129 + type = L2TAB; 1.130 + break; 1.131 + case PGT_l3_page_table: 1.132 + type = L3TAB; 1.133 + break; 1.134 + case PGT_l4_page_table: 1.135 + type = L4TAB; 1.136 + break; 1.137 + } 1.138 + 1.139 + if ( page->u.inuse.type_info & PGT_pinned ) 1.140 + type |= LPINTAB; 1.141 + l_arr[j] |= type; 1.142 + put_page(page); 1.143 + } 1.144 + else 1.145 + { 1.146 + e2_err: 1.147 + l_arr[j] |= XTAB; 1.148 + } 1.149 + 1.150 + } 1.151 + 1.152 + if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) ) 1.153 + { 1.154 + ret = -EINVAL; 1.155 + break; 1.156 + } 1.157 + 1.158 + n += j; 1.159 + } 1.160 + 1.161 + put_domain(d); 1.162 + } 1.163 + break; 1.164 + 1.165 default: 1.166 ret = -ENOSYS; 1.167
2.1 --- a/xen/arch/x86/domain.c Fri Jan 21 02:08:45 2005 +0000 2.2 +++ b/xen/arch/x86/domain.c Fri Jan 21 18:43:08 2005 +0000 2.3 @@ -196,6 +196,48 @@ void machine_halt(void) 2.4 __machine_halt(NULL); 2.5 } 2.6 2.7 +void dump_pageframe_info(struct domain *d) 2.8 +{ 2.9 + struct pfn_info *page; 2.10 + struct list_head *ent; 2.11 + 2.12 + if ( d->tot_pages < 10 ) 2.13 + { 2.14 + list_for_each ( ent, &d->page_list ) 2.15 + { 2.16 + page = list_entry(ent, struct pfn_info, list); 2.17 + printk("Page %08x: caf=%08x, taf=%08x\n", 2.18 + page_to_phys(page), page->count_info, 2.19 + page->u.inuse.type_info); 2.20 + } 2.21 + } 2.22 + 2.23 + page = virt_to_page(d->shared_info); 2.24 + printk("Shared_info@%08x: caf=%08x, taf=%08x\n", 2.25 + page_to_phys(page), page->count_info, 2.26 + page->u.inuse.type_info); 2.27 +} 2.28 + 2.29 +xmem_cache_t *domain_struct_cachep; 2.30 +void __init domain_startofday(void) 2.31 +{ 2.32 + domain_struct_cachep = xmem_cache_create( 2.33 + "domain_cache", sizeof(struct domain), 2.34 + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 2.35 + if ( domain_struct_cachep == NULL ) 2.36 + panic("No slab cache for domain structs."); 2.37 +} 2.38 + 2.39 +struct domain *arch_alloc_domain_struct(void) 2.40 +{ 2.41 + return xmem_cache_alloc(domain_struct_cachep); 2.42 +} 2.43 + 2.44 +void arch_free_domain_struct(struct domain *d) 2.45 +{ 2.46 + xmem_cache_free(domain_struct_cachep, d); 2.47 +} 2.48 + 2.49 void free_perdomain_pt(struct domain *d) 2.50 { 2.51 free_xenheap_page((unsigned long)d->mm.perdomain_pt);
3.1 --- a/xen/arch/x86/memory.c Fri Jan 21 02:08:45 2005 +0000 3.2 +++ b/xen/arch/x86/memory.c Fri Jan 21 18:43:08 2005 +0000 3.3 @@ -1325,7 +1325,7 @@ int do_mmu_update( 3.4 u32 type_info; 3.5 domid_t domid; 3.6 3.7 - cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE); 3.8 + cleanup_writable_pagetable(d); 3.9 3.10 /* 3.11 * If we are resuming after preemption, read how much work we have already 3.12 @@ -1552,7 +1552,7 @@ int do_update_va_mapping(unsigned long p 3.13 if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) ) 3.14 return -EINVAL; 3.15 3.16 - cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE); 3.17 + cleanup_writable_pagetable(d); 3.18 3.19 /* 3.20 * XXX When we make this support 4MB superpages we should also deal with
4.1 --- a/xen/common/Makefile Fri Jan 21 02:08:45 2005 +0000 4.2 +++ b/xen/common/Makefile Fri Jan 21 18:43:08 2005 +0000 4.3 @@ -2,6 +2,8 @@ 4.4 include $(BASEDIR)/Rules.mk 4.5 4.6 ifeq ($(TARGET_ARCH),ia64) 4.7 +OBJS := $(subst dom_mem_ops.o,,$(OBJS)) 4.8 +OBJS := $(subst grant_table.o,,$(OBJS)) 4.9 OBJS := $(subst page_alloc.o,,$(OBJS)) 4.10 OBJS := $(subst slab.o,,$(OBJS)) 4.11 endif
5.1 --- a/xen/common/dom0_ops.c Fri Jan 21 02:08:45 2005 +0000 5.2 +++ b/xen/common/dom0_ops.c Fri Jan 21 18:43:08 2005 +0000 5.3 @@ -390,62 +390,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 5.4 } 5.5 break; 5.6 5.7 - case DOM0_GETPAGEFRAMEINFO: 5.8 - { 5.9 - struct pfn_info *page; 5.10 - unsigned long pfn = op->u.getpageframeinfo.pfn; 5.11 - domid_t dom = op->u.getpageframeinfo.domain; 5.12 - struct domain *d; 5.13 - 5.14 - ret = -EINVAL; 5.15 - 5.16 - if ( unlikely(pfn >= max_page) || 5.17 - unlikely((d = find_domain_by_id(dom)) == NULL) ) 5.18 - break; 5.19 - 5.20 - page = &frame_table[pfn]; 5.21 - 5.22 - if ( likely(get_page(page, d)) ) 5.23 - { 5.24 - ret = 0; 5.25 - 5.26 - op->u.getpageframeinfo.type = NOTAB; 5.27 - 5.28 - if ( (page->u.inuse.type_info & PGT_count_mask) != 0 ) 5.29 - { 5.30 - switch ( page->u.inuse.type_info & PGT_type_mask ) 5.31 - { 5.32 - case PGT_l1_page_table: 5.33 - op->u.getpageframeinfo.type = L1TAB; 5.34 - break; 5.35 - case PGT_l2_page_table: 5.36 - op->u.getpageframeinfo.type = L2TAB; 5.37 - break; 5.38 - case PGT_l3_page_table: 5.39 - op->u.getpageframeinfo.type = L3TAB; 5.40 - break; 5.41 - case PGT_l4_page_table: 5.42 - op->u.getpageframeinfo.type = L4TAB; 5.43 - break; 5.44 - } 5.45 - } 5.46 - 5.47 - put_page(page); 5.48 - } 5.49 - 5.50 - put_domain(d); 5.51 - 5.52 - copy_to_user(u_dom0_op, op, sizeof(*op)); 5.53 - } 5.54 - break; 5.55 - 5.56 - case DOM0_IOPL: 5.57 - { 5.58 - extern long do_iopl(domid_t, unsigned int); 5.59 - ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl); 5.60 - } 5.61 - break; 5.62 - 5.63 #ifdef XEN_DEBUGGER 5.64 case DOM0_DEBUG: 5.65 { 5.66 @@ -482,21 +426,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 5.67 } 5.68 break; 5.69 5.70 - case DOM0_PHYSINFO: 5.71 - { 5.72 - dom0_physinfo_t *pi = &op->u.physinfo; 5.73 - 5.74 - pi->ht_per_core = opt_noht ? 1 : ht_per_core; 5.75 - pi->cores = smp_num_cpus / pi->ht_per_core; 5.76 - pi->total_pages = max_page; 5.77 - pi->free_pages = avail_domheap_pages(); 5.78 - pi->cpu_khz = cpu_khz; 5.79 - 5.80 - copy_to_user(u_dom0_op, op, sizeof(*op)); 5.81 - ret = 0; 5.82 - } 5.83 - break; 5.84 - 5.85 case DOM0_PCIDEV_ACCESS: 5.86 { 5.87 extern int physdev_pci_access_modify(domid_t, int, int, int, int); 5.88 @@ -549,93 +478,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 5.89 } 5.90 break; 5.91 5.92 - case DOM0_GETPAGEFRAMEINFO2: 5.93 - { 5.94 -#define GPF2_BATCH 128 5.95 - int n,j; 5.96 - int num = op->u.getpageframeinfo2.num; 5.97 - domid_t dom = op->u.getpageframeinfo2.domain; 5.98 - unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array; 5.99 - struct domain *d; 5.100 - unsigned long l_arr[GPF2_BATCH]; 5.101 - ret = -ESRCH; 5.102 - 5.103 - if ( unlikely((d = find_domain_by_id(dom)) == NULL) ) 5.104 - break; 5.105 - 5.106 - if ( unlikely(num > 1024) ) 5.107 - { 5.108 - ret = -E2BIG; 5.109 - break; 5.110 - } 5.111 - 5.112 - ret = 0; 5.113 - for( n = 0; n < num; ) 5.114 - { 5.115 - int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n); 5.116 - 5.117 - if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) ) 5.118 - { 5.119 - ret = -EINVAL; 5.120 - break; 5.121 - } 5.122 - 5.123 - for( j = 0; j < k; j++ ) 5.124 - { 5.125 - struct pfn_info *page; 5.126 - unsigned long mfn = l_arr[j]; 5.127 - 5.128 - if ( unlikely(mfn >= max_page) ) 5.129 - goto e2_err; 5.130 - 5.131 - page = &frame_table[mfn]; 5.132 - 5.133 - if ( likely(get_page(page, d)) ) 5.134 - { 5.135 - unsigned long type = 0; 5.136 - 5.137 - switch( page->u.inuse.type_info & PGT_type_mask ) 5.138 - { 5.139 - case PGT_l1_page_table: 5.140 - type = L1TAB; 5.141 - break; 5.142 - case PGT_l2_page_table: 5.143 - type = L2TAB; 5.144 - break; 5.145 - case PGT_l3_page_table: 5.146 - type = L3TAB; 5.147 - break; 5.148 - case PGT_l4_page_table: 5.149 - type = L4TAB; 5.150 - break; 5.151 - } 5.152 - 5.153 - if ( page->u.inuse.type_info & PGT_pinned ) 5.154 - type |= LPINTAB; 5.155 - l_arr[j] |= type; 5.156 - put_page(page); 5.157 - } 5.158 - else 5.159 - { 5.160 - e2_err: 5.161 - l_arr[j] |= XTAB; 5.162 - } 5.163 - 5.164 - } 5.165 - 5.166 - if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) ) 5.167 - { 5.168 - ret = -EINVAL; 5.169 - break; 5.170 - } 5.171 - 5.172 - n += j; 5.173 - } 5.174 - 5.175 - put_domain(d); 5.176 - } 5.177 - break; 5.178 - 5.179 case DOM0_SETDOMAINVMASSIST: 5.180 { 5.181 struct domain *d;
6.1 --- a/xen/common/domain.c Fri Jan 21 02:08:45 2005 +0000 6.2 +++ b/xen/common/domain.c Fri Jan 21 18:43:08 2005 +0000 6.3 @@ -22,18 +22,8 @@ rwlock_t domlist_lock = RW_LOCK_UNLOCKED 6.4 struct domain *domain_hash[DOMAIN_HASH_SIZE]; 6.5 struct domain *domain_list; 6.6 6.7 -xmem_cache_t *domain_struct_cachep; 6.8 struct domain *dom0; 6.9 6.10 -void __init domain_startofday(void) 6.11 -{ 6.12 - domain_struct_cachep = xmem_cache_create( 6.13 - "domain_cache", sizeof(struct domain), 6.14 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 6.15 - if ( domain_struct_cachep == NULL ) 6.16 - panic("No slab cache for domain structs."); 6.17 -} 6.18 - 6.19 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu) 6.20 { 6.21 struct domain *d, **pd; 6.22 @@ -203,8 +193,8 @@ unsigned int alloc_new_dom_mem(struct do 6.23 return -ENOMEM; 6.24 } 6.25 6.26 - /* initialise to machine_to_phys_mapping table to likely pfn */ 6.27 - machine_to_phys_mapping[page-frame_table] = alloc_pfns; 6.28 + /* Initialise the machine-to-phys mapping for this page. */ 6.29 + set_machinetophys(page_to_pfn(page), alloc_pfns); 6.30 } 6.31 6.32 return 0;
7.1 --- a/xen/common/elf.c Fri Jan 21 02:08:45 2005 +0000 7.2 +++ b/xen/common/elf.c Fri Jan 21 18:43:08 2005 +0000 7.3 @@ -10,6 +10,14 @@ 7.4 #include <xen/mm.h> 7.5 #include <xen/elf.h> 7.6 7.7 +#ifdef CONFIG_X86 7.8 +#define FORCE_XENELF_IMAGE 1 7.9 +#define ELF_ADDR p_vaddr 7.10 +#elif defined(__ia64__) 7.11 +#define FORCE_XENELF_IMAGE 0 7.12 +#define ELF_ADDR p_paddr 7.13 +#endif 7.14 + 7.15 static inline int is_loadable_phdr(Elf_Phdr *phdr) 7.16 { 7.17 return ((phdr->p_type == PT_LOAD) && 7.18 @@ -84,7 +92,9 @@ int parseelfimage(char *elfbase, 7.19 if ( guestinfo == NULL ) 7.20 { 7.21 printk("Not a Xen-ELF image: '__xen_guest' section not found.\n"); 7.22 +#ifndef FORCE_XENELF_IMAGE 7.23 return -EINVAL; 7.24 +#endif 7.25 } 7.26 7.27 for ( h = 0; h < ehdr->e_phnum; h++ ) 7.28 @@ -92,10 +102,10 @@ int parseelfimage(char *elfbase, 7.29 phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize)); 7.30 if ( !is_loadable_phdr(phdr) ) 7.31 continue; 7.32 - if ( phdr->p_vaddr < kernstart ) 7.33 - kernstart = phdr->p_vaddr; 7.34 - if ( (phdr->p_vaddr + phdr->p_memsz) > kernend ) 7.35 - kernend = phdr->p_vaddr + phdr->p_memsz; 7.36 + if ( phdr->ELF_ADDR < kernstart ) 7.37 + kernstart = phdr->ELF_ADDR; 7.38 + if ( (phdr->ELF_ADDR + phdr->p_memsz) > kernend ) 7.39 + kernend = phdr->ELF_ADDR + phdr->p_memsz; 7.40 } 7.41 7.42 if ( (kernstart > kernend) || 7.43 @@ -107,11 +117,15 @@ int parseelfimage(char *elfbase, 7.44 } 7.45 7.46 dsi->v_start = kernstart; 7.47 - if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL ) 7.48 - dsi->v_start = simple_strtoul(p+10, &p, 0); 7.49 7.50 - if ( (p = strstr(guestinfo, "PT_MODE_WRITABLE")) != NULL ) 7.51 - dsi->use_writable_pagetables = 1; 7.52 + if ( guestinfo != NULL ) 7.53 + { 7.54 + if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL ) 7.55 + dsi->v_start = simple_strtoul(p+10, &p, 0); 7.56 + 7.57 + if ( (p = strstr(guestinfo, "PT_MODE_WRITABLE")) != NULL ) 7.58 + dsi->use_writable_pagetables = 1; 7.59 + } 7.60 7.61 dsi->v_kernstart = kernstart; 7.62 dsi->v_kernend = kernend; 7.63 @@ -132,10 +146,10 @@ int loadelfimage(char *elfbase) 7.64 if ( !is_loadable_phdr(phdr) ) 7.65 continue; 7.66 if ( phdr->p_filesz != 0 ) 7.67 - memcpy((char *)phdr->p_vaddr, elfbase + phdr->p_offset, 7.68 + memcpy((char *)phdr->ELF_ADDR, elfbase + phdr->p_offset, 7.69 phdr->p_filesz); 7.70 if ( phdr->p_memsz > phdr->p_filesz ) 7.71 - memset((char *)phdr->p_vaddr + phdr->p_filesz, 0, 7.72 + memset((char *)phdr->ELF_ADDR + phdr->p_filesz, 0, 7.73 phdr->p_memsz - phdr->p_filesz); 7.74 } 7.75
8.1 --- a/xen/common/keyhandler.c Fri Jan 21 02:08:45 2005 +0000 8.2 +++ b/xen/common/keyhandler.c Fri Jan 21 18:43:08 2005 +0000 8.3 @@ -97,8 +97,6 @@ void do_task_queues(unsigned char key) 8.4 { 8.5 struct domain *d; 8.6 s_time_t now = NOW(); 8.7 - struct list_head *ent; 8.8 - struct pfn_info *page; 8.9 8.10 printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key, 8.11 (u32)(now>>32), (u32)now); 8.12 @@ -113,21 +111,7 @@ void do_task_queues(unsigned char key) 8.13 test_bit(DF_RUNNING, &d->flags) ? 'T':'F', d->flags, 8.14 atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages); 8.15 8.16 - if ( d->tot_pages < 10 ) 8.17 - { 8.18 - list_for_each ( ent, &d->page_list ) 8.19 - { 8.20 - page = list_entry(ent, struct pfn_info, list); 8.21 - printk("Page %08x: caf=%08x, taf=%08x\n", 8.22 - page_to_phys(page), page->count_info, 8.23 - page->u.inuse.type_info); 8.24 - } 8.25 - } 8.26 - 8.27 - page = virt_to_page(d->shared_info); 8.28 - printk("Shared_info@%08x: caf=%08x, taf=%08x\n", 8.29 - page_to_phys(page), page->count_info, 8.30 - page->u.inuse.type_info); 8.31 + dump_pageframe_info(d); 8.32 8.33 printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 8.34 d->shared_info->vcpu_data[0].evtchn_upcall_pending,
9.1 --- a/xen/common/schedule.c Fri Jan 21 02:08:45 2005 +0000 9.2 +++ b/xen/common/schedule.c Fri Jan 21 18:43:08 2005 +0000 9.3 @@ -89,26 +89,24 @@ static struct scheduler ops; 9.4 /* Per-CPU periodic timer sends an event to the currently-executing domain. */ 9.5 static struct ac_timer t_timer[NR_CPUS]; 9.6 9.7 -extern xmem_cache_t *domain_struct_cachep; 9.8 - 9.9 void free_domain_struct(struct domain *d) 9.10 { 9.11 SCHED_OP(free_task, d); 9.12 - xmem_cache_free(domain_struct_cachep, d); 9.13 + arch_free_domain_struct(d); 9.14 } 9.15 9.16 struct domain *alloc_domain_struct(void) 9.17 { 9.18 struct domain *d; 9.19 9.20 - if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL ) 9.21 + if ( (d = arch_alloc_domain_struct()) == NULL ) 9.22 return NULL; 9.23 9.24 memset(d, 0, sizeof(*d)); 9.25 9.26 if ( SCHED_OP(alloc_task, d) < 0 ) 9.27 { 9.28 - xmem_cache_free(domain_struct_cachep, d); 9.29 + arch_free_domain_struct(d); 9.30 return NULL; 9.31 } 9.32 9.33 @@ -320,8 +318,7 @@ void __enter_scheduler(void) 9.34 task_slice_t next_slice; 9.35 s32 r_time; /* time for new dom to run */ 9.36 9.37 - cleanup_writable_pagetable( 9.38 - prev, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE); 9.39 + cleanup_writable_pagetable(prev); 9.40 9.41 perfc_incrc(sched_run); 9.42
10.1 --- a/xen/common/softirq.c Fri Jan 21 02:08:45 2005 +0000 10.2 +++ b/xen/common/softirq.c Fri Jan 21 18:43:08 2005 +0000 10.3 @@ -15,7 +15,9 @@ 10.4 #include <xen/sched.h> 10.5 #include <xen/softirq.h> 10.6 10.7 +#ifndef __ARCH_IRQ_STAT 10.8 irq_cpustat_t irq_stat[NR_CPUS]; 10.9 +#endif 10.10 10.11 static softirq_handler softirq_handlers[NR_SOFTIRQS]; 10.12
11.1 --- a/xen/drivers/char/console.c Fri Jan 21 02:08:45 2005 +0000 11.2 +++ b/xen/drivers/char/console.c Fri Jan 21 18:43:08 2005 +0000 11.3 @@ -18,6 +18,7 @@ 11.4 #include <xen/serial.h> 11.5 #include <xen/keyhandler.h> 11.6 #include <asm/uaccess.h> 11.7 +#include <asm/mm.h> 11.8 11.9 /* opt_console: comma-separated list of console outputs. */ 11.10 static unsigned char opt_console[30] = "com1,vga"; 11.11 @@ -31,7 +32,7 @@ static unsigned char opt_conswitch[5] = 11.12 string_param("conswitch", opt_conswitch); 11.13 11.14 static int xpos, ypos; 11.15 -static unsigned char *video = __va(0xB8000); 11.16 +static unsigned char *video; 11.17 11.18 #define CONSOLE_RING_SIZE 16392 11.19 typedef struct console_ring_st 11.20 @@ -137,6 +138,8 @@ static void init_vga(void) 11.21 return; 11.22 } 11.23 11.24 + video = __va(0xB8000); 11.25 + 11.26 tmp = inb(0x3da); 11.27 outb(0x00, 0x3c0); 11.28
12.1 --- a/xen/drivers/char/serial.c Fri Jan 21 02:08:45 2005 +0000 12.2 +++ b/xen/drivers/char/serial.c Fri Jan 21 18:43:08 2005 +0000 12.3 @@ -100,6 +100,16 @@ static uart_t com[2] = { 12.4 #define UART_ENABLED(_u) ((_u)->baud != 0) 12.5 #define DISABLE_UART(_u) ((_u)->baud = 0) 12.6 12.7 +#ifdef CONFIG_X86 12.8 +static inline int arch_serial_putc(uart_t *uart, unsigned char c) 12.9 +{ 12.10 + int space; 12.11 + if ( (space = (inb(uart->io_base + LSR) & LSR_THRE)) ) 12.12 + outb(c, uart->io_base + THR); 12.13 + return space; 12.14 +} 12.15 +#endif 12.16 + 12.17 12.18 /*********************** 12.19 * PRIVATE FUNCTIONS 12.20 @@ -151,8 +161,7 @@ static inline void __serial_putc(uart_t 12.21 12.22 do { 12.23 spin_lock_irqsave(&uart->lock, flags); 12.24 - if ( (space = (inb(uart->io_base + LSR) & LSR_THRE)) ) 12.25 - outb(c, uart->io_base + THR); 12.26 + space = arch_serial_putc(uart, c); 12.27 spin_unlock_irqrestore(&uart->lock, flags); 12.28 } 12.29 while ( !space );
13.1 --- a/xen/include/asm-x86/mm.h Fri Jan 21 02:08:45 2005 +0000 13.2 +++ b/xen/include/asm-x86/mm.h Fri Jan 21 18:43:08 2005 +0000 13.3 @@ -224,6 +224,8 @@ extern unsigned long *machine_to_phys_ma 13.4 extern unsigned long m2p_start_mfn; 13.5 #endif 13.6 13.7 +#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) 13.8 + 13.9 #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1) 13.10 #define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table) 13.11 13.12 @@ -285,10 +287,11 @@ do { 13.13 ptwr_flush(PTWR_PT_INACTIVE); \ 13.14 } while ( 0 ) 13.15 13.16 -#define cleanup_writable_pagetable(_d, _w) \ 13.17 +#define cleanup_writable_pagetable(_d) \ 13.18 do { \ 13.19 if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \ 13.20 - __cleanup_writable_pagetable(_w); \ 13.21 + __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE | \ 13.22 + PTWR_CLEANUP_INACTIVE); \ 13.23 } while ( 0 ) 13.24 13.25 #ifndef NDEBUG
14.1 --- a/xen/include/public/arch-x86_32.h Fri Jan 21 02:08:45 2005 +0000 14.2 +++ b/xen/include/public/arch-x86_32.h Fri Jan 21 18:43:08 2005 +0000 14.3 @@ -9,6 +9,11 @@ 14.4 #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ 14.5 #define __XEN_PUBLIC_ARCH_X86_32_H__ 14.6 14.7 +#ifndef PACKED 14.8 +/* GCC-specific way to pack structure definitions (no implicit padding). */ 14.9 +#define PACKED __attribute__ ((packed)) 14.10 +#endif 14.11 + 14.12 /* 14.13 * Pointers and other address fields inside interface structures are padded to 14.14 * 64 bits. This means that field alignments aren't different between 32- and
15.1 --- a/xen/include/public/arch-x86_64.h Fri Jan 21 02:08:45 2005 +0000 15.2 +++ b/xen/include/public/arch-x86_64.h Fri Jan 21 18:43:08 2005 +0000 15.3 @@ -9,6 +9,11 @@ 15.4 #ifndef __XEN_PUBLIC_ARCH_X86_64_H__ 15.5 #define __XEN_PUBLIC_ARCH_X86_64_H__ 15.6 15.7 +#ifndef PACKED 15.8 +/* GCC-specific way to pack structure definitions (no implicit padding). */ 15.9 +#define PACKED __attribute__ ((packed)) 15.10 +#endif 15.11 + 15.12 /* Pointers are naturally 64 bits in this architecture; no padding needed. */ 15.13 #define _MEMORY_PADDING(_X) 15.14 #define MEMORY_PADDING
16.1 --- a/xen/include/public/xen.h Fri Jan 21 02:08:45 2005 +0000 16.2 +++ b/xen/include/public/xen.h Fri Jan 21 18:43:08 2005 +0000 16.3 @@ -9,11 +9,6 @@ 16.4 #ifndef __XEN_PUBLIC_XEN_H__ 16.5 #define __XEN_PUBLIC_XEN_H__ 16.6 16.7 -#ifndef PACKED 16.8 -/* GCC-specific way to pack structure definitions (no implicit padding). */ 16.9 -#define PACKED __attribute__ ((packed)) 16.10 -#endif 16.11 - 16.12 #if defined(__i386__) 16.13 #include "arch-x86_32.h" 16.14 #elif defined(__x86_64__)
17.1 --- a/xen/include/xen/domain.h Fri Jan 21 02:08:45 2005 +0000 17.2 +++ b/xen/include/xen/domain.h Fri Jan 21 18:43:08 2005 +0000 17.3 @@ -2,12 +2,16 @@ 17.4 #ifndef __XEN_DOMAIN_H__ 17.5 #define __XEN_DOMAIN_H__ 17.6 17.7 -extern void domain_startofday(void); 17.8 - 17.9 /* 17.10 * Arch-specifics. 17.11 */ 17.12 17.13 +extern void domain_startofday(void); 17.14 + 17.15 +extern struct domain *arch_alloc_domain_struct(void); 17.16 + 17.17 +extern void arch_free_domain_struct(struct domain *d); 17.18 + 17.19 extern void arch_do_createdomain(struct domain *d); 17.20 17.21 extern int arch_final_setup_guestos( 17.22 @@ -17,4 +21,6 @@ extern void free_perdomain_pt(struct dom 17.23 17.24 extern void domain_relinquish_memory(struct domain *d); 17.25 17.26 +extern void dump_pageframe_info(struct domain *d); 17.27 + 17.28 #endif /* __XEN_DOMAIN_H__ */
18.1 --- a/xen/include/xen/grant_table.h Fri Jan 21 02:08:45 2005 +0000 18.2 +++ b/xen/include/xen/grant_table.h Fri Jan 21 18:43:08 2005 +0000 18.3 @@ -25,7 +25,6 @@ 18.4 #define __XEN_GRANT_H__ 18.5 18.6 #include <xen/config.h> 18.7 -#include <xen/mm.h> 18.8 #include <public/grant_table.h> 18.9 18.10 /* Active grant entry - used for shadowing GTF_permit_access grants. */
19.1 --- a/xen/include/xen/slab.h Fri Jan 21 02:08:45 2005 +0000 19.2 +++ b/xen/include/xen/slab.h Fri Jan 21 18:43:08 2005 +0000 19.3 @@ -6,6 +6,14 @@ 19.4 #ifndef __SLAB_H__ 19.5 #define __SLAB_H__ 19.6 19.7 +#include <xen/config.h> 19.8 + 19.9 +#ifdef __ARCH_HAS_SLAB_ALLOCATOR 19.10 + 19.11 +#include <asm/slab.h> 19.12 + 19.13 +#else 19.14 + 19.15 typedef struct xmem_cache_s xmem_cache_t; 19.16 19.17 #include <xen/mm.h> 19.18 @@ -44,4 +52,6 @@ extern int xmem_cache_reap(void); 19.19 19.20 extern void dump_slabinfo(); 19.21 19.22 +#endif /* __ARCH_HAS_SLAB_ALLOCATOR */ 19.23 + 19.24 #endif /* __SLAB_H__ */