direct-io.hg
changeset 1590:1b2e743614ea
bitkeeper revision 1.1020 (40debbf6f8HvBW3MU_9c6ljHeGaKeQ)
cleanups.
cleanups.
line diff
1.1 --- a/xen/arch/x86/Makefile Sun Jun 27 08:32:57 2004 +0000 1.2 +++ b/xen/arch/x86/Makefile Sun Jun 27 12:22:14 2004 +0000 1.3 @@ -6,18 +6,20 @@ OBJS := $(subst pdb-linux.o,,$(OBJS)) 1.4 OBJS := $(subst pdb-stub.o,,$(OBJS)) 1.5 endif 1.6 1.7 -# What happens here? We link monitor object files together, starting 1.8 -# at MONITOR_BASE (a very high address). But bootloader cannot put 1.9 -# things there, so we initially load at LOAD_BASE. A hacky little 1.10 -# tool called `elf-reloc' is used to modify segment offsets from 1.11 -# MONITOR_BASE-relative to LOAD_BASE-relative. 1.12 +LINK_BASE := 0xFC500000 # Xen is linked here 1.13 +LOAD_BASE := 0x00100000 # Xen is loaded here 1.14 + 1.15 +# What happens here? We link object files together, starting at LINK_BASE 1.16 +# (a very high address). But the bootloader cannot put things there, so we 1.17 +# initially load at LOAD_BASE. A tool called `elf-reloc' is used to modify 1.18 +# segment offsets from LINK_BASE-relative to LOAD_BASE-relative. 1.19 # (NB. Linux gets round this by turning its image into raw binary, then 1.20 # wrapping that with a low-memory bootstrapper.) 1.21 default: boot/boot.o $(OBJS) 1.22 $(LD) -r -o arch.o $(OBJS) 1.23 $(LD) $(LDFLAGS) boot/boot.o $(ALL_OBJS) -o $(TARGET)-syms 1.24 objcopy -R .note -R .comment -S $(TARGET)-syms $(TARGET) 1.25 - $(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET) 1.26 + $(BASEDIR)/tools/elf-reloc $(LINK_BASE) $(LOAD_BASE) $(TARGET) 1.27 1.28 clean: 1.29 rm -f *.o *~ core boot/*.o boot/*~ boot/core
2.1 --- a/xen/arch/x86/Rules.mk Sun Jun 27 08:32:57 2004 +0000 2.2 +++ b/xen/arch/x86/Rules.mk Sun Jun 27 12:22:14 2004 +0000 2.3 @@ -4,16 +4,9 @@ 2.4 CC := gcc 2.5 LD := ld 2.6 2.7 -# Linker should relocate monitor to this address 2.8 -MONITOR_BASE := 0xFC500000 2.9 - 2.10 -# Bootloader should load monitor to this real address 2.11 -LOAD_BASE := 0x00100000 2.12 - 2.13 CFLAGS := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing -O3 2.14 -CFLAGS += -iwithprefix include -Wall -Werror -DMONITOR_BASE=$(MONITOR_BASE) 2.15 -CFLAGS += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__ 2.16 -CFLAGS += -Wno-pointer-arith -Wredundant-decls 2.17 +CFLAGS += -iwithprefix include -Wall -Werror -fomit-frame-pointer 2.18 +CFLAGS += -I$(BASEDIR)/include -Wno-pointer-arith -Wredundant-decls 2.19 2.20 LDFLAGS := -T xen.lds -N 2.21
3.1 --- a/xen/arch/x86/entry.S Sun Jun 27 08:32:57 2004 +0000 3.2 +++ b/xen/arch/x86/entry.S Sun Jun 27 12:22:14 2004 +0000 3.3 @@ -6,8 +6,8 @@ 3.4 */ 3.5 3.6 /* 3.7 - * The idea for callbacks from monitor -> guest OS 3.8 - * =============================================== 3.9 + * The idea for callbacks to guest OSes 3.10 + * ==================================== 3.11 * 3.12 * First, we require that all callbacks (either via a supplied 3.13 * interrupt-descriptor-table, or via the special event or failsafe callbacks 3.14 @@ -23,7 +23,7 @@ 3.15 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather 3.16 * than the correct ring) and bad things are bound to ensue -- IRET is 3.17 * likely to fault, and we may end up killing the domain (no harm can 3.18 - * come to the hypervisor itself, though). 3.19 + * come to Xen, though). 3.20 * 3.21 * When doing a callback, we check if the return CS is in ring 0. If so, 3.22 * callback is delayed until next return to ring != 0.
4.1 --- a/xen/arch/x86/mm.c Sun Jun 27 08:32:57 2004 +0000 4.2 +++ b/xen/arch/x86/mm.c Sun Jun 27 12:22:14 2004 +0000 4.3 @@ -347,7 +347,7 @@ void *memguard_init(void *heap_start) 4.4 PAGE_MASK); 4.5 4.6 /* Memory guarding is incompatible with super pages. */ 4.7 - for ( i = 0; i < (MAX_MONITOR_ADDRESS >> L2_PAGETABLE_SHIFT); i++ ) 4.8 + for ( i = 0; i < (MAX_XENHEAP_ADDRESS >> L2_PAGETABLE_SHIFT); i++ ) 4.9 { 4.10 l1 = (l1_pgentry_t *)heap_start; 4.11 heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
5.1 --- a/xen/arch/x86/trampoline.S Sun Jun 27 08:32:57 2004 +0000 5.2 +++ b/xen/arch/x86/trampoline.S Sun Jun 27 12:22:14 2004 +0000 5.3 @@ -43,7 +43,7 @@ r_base = . 5.4 lmsw %ax # into protected mode 5.5 jmp flush_instr 5.6 flush_instr: 5.7 - ljmpl $__HYPERVISOR_CS, $(MONITOR_BASE)-__PAGE_OFFSET 5.8 + ljmpl $__HYPERVISOR_CS, $0x100000 # 1MB 5.9 5.10 idt_48: 5.11 .word 0 # idt limit = 0
6.1 --- a/xen/arch/x86/traps.c Sun Jun 27 08:32:57 2004 +0000 6.2 +++ b/xen/arch/x86/traps.c Sun Jun 27 12:22:14 2004 +0000 6.3 @@ -171,14 +171,14 @@ void show_registers(struct pt_regs *regs 6.4 6.5 spinlock_t die_lock = SPIN_LOCK_UNLOCKED; 6.6 6.7 -void die(const char * str, struct pt_regs * regs, long err) 6.8 +void die(const char *str, struct pt_regs * regs, long err) 6.9 { 6.10 unsigned long flags; 6.11 spin_lock_irqsave(&die_lock, flags); 6.12 printk("%s: %04lx,%04lx\n", str, err >> 16, err & 0xffff); 6.13 show_registers(regs); 6.14 spin_unlock_irqrestore(&die_lock, flags); 6.15 - panic("HYPERVISOR DEATH!!\n"); 6.16 + panic("Fatal crash within Xen.\n"); 6.17 } 6.18 6.19 6.20 @@ -192,7 +192,7 @@ static inline void do_trap(int trapnr, c 6.21 unsigned long fixup; 6.22 6.23 if (!(regs->xcs & 3)) 6.24 - goto fault_in_hypervisor; 6.25 + goto xen_fault; 6.26 6.27 ti = current->thread.traps + trapnr; 6.28 gtb->flags = use_error_code ? GTBF_TRAP : GTBF_TRAP_NOCODE; 6.29 @@ -203,7 +203,7 @@ static inline void do_trap(int trapnr, c 6.30 p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 6.31 return; 6.32 6.33 - fault_in_hypervisor: 6.34 + xen_fault: 6.35 6.36 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 6.37 { 6.38 @@ -338,7 +338,7 @@ asmlinkage void do_page_fault(struct pt_ 6.39 return; /* Returns TRUE if fault was handled. */ 6.40 6.41 if ( unlikely(!(regs->xcs & 3)) ) 6.42 - goto fault_in_hypervisor; 6.43 + goto xen_fault; 6.44 6.45 ti = p->thread.traps + 14; 6.46 gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */ 6.47 @@ -350,7 +350,7 @@ asmlinkage void do_page_fault(struct pt_ 6.48 p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 6.49 return; 6.50 6.51 - fault_in_hypervisor: 6.52 + xen_fault: 6.53 6.54 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 6.55 { 6.56 @@ -589,11 +589,10 @@ asmlinkage void do_debug(struct pt_regs 6.57 /* Clear TF just for absolute sanity. */ 6.58 regs->eflags &= ~EF_TF; 6.59 /* 6.60 - * Basically, we ignore watchpoints when they trigger in 6.61 - * the hypervisor. This may happen when a buffer is passed 6.62 - * to us which previously had a watchpoint set on it. 6.63 - * No need to bump EIP; the only faulting trap is an 6.64 - * instruction breakpoint, which can't happen to us. 6.65 + * We ignore watchpoints when they trigger within Xen. This may happen 6.66 + * when a buffer is passed to us which previously had a watchpoint set 6.67 + * on it. No need to bump EIP; the only faulting trap is an instruction 6.68 + * breakpoint, which can't happen to us. 6.69 */ 6.70 return; 6.71 } 6.72 @@ -717,7 +716,7 @@ void __init trap_init(void) 6.73 set_intr_gate(18,&machine_check); 6.74 set_intr_gate(19,&simd_coprocessor_error); 6.75 6.76 - /* Only ring 1 can access monitor services. */ 6.77 + /* Only ring 1 can access Xen services. */ 6.78 _set_gate(idt_table+HYPERCALL_VECTOR,14,1,&hypercall); 6.79 6.80 /* CPU0 uses the master IDT. */
7.1 --- a/xen/common/domain.c Sun Jun 27 08:32:57 2004 +0000 7.2 +++ b/xen/common/domain.c Sun Jun 27 12:22:14 2004 +0000 7.3 @@ -48,124 +48,123 @@ struct domain *task_list; 7.4 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu) 7.5 { 7.6 char buf[100]; 7.7 - struct domain *p, **pp; 7.8 + struct domain *d, **pd; 7.9 unsigned long flags; 7.10 7.11 - if ( (p = alloc_domain_struct()) == NULL ) 7.12 + if ( (d = alloc_domain_struct()) == NULL ) 7.13 return NULL; 7.14 7.15 - atomic_set(&p->refcnt, 1); 7.16 - atomic_set(&p->pausecnt, 0); 7.17 + atomic_set(&d->refcnt, 1); 7.18 + atomic_set(&d->pausecnt, 0); 7.19 7.20 - spin_lock_init(&p->mm.shadow_lock); 7.21 + spin_lock_init(&d->mm.shadow_lock); 7.22 7.23 - p->domain = dom_id; 7.24 - p->processor = cpu; 7.25 - p->create_time = NOW(); 7.26 + d->domain = dom_id; 7.27 + d->processor = cpu; 7.28 + d->create_time = NOW(); 7.29 7.30 - memcpy(&p->thread, &idle0_task.thread, sizeof(p->thread)); 7.31 + memcpy(&d->thread, &idle0_task.thread, sizeof(d->thread)); 7.32 7.33 - if ( p->domain != IDLE_DOMAIN_ID ) 7.34 + if ( d->domain != IDLE_DOMAIN_ID ) 7.35 { 7.36 - if ( init_event_channels(p) != 0 ) 7.37 + if ( init_event_channels(d) != 0 ) 7.38 { 7.39 - free_domain_struct(p); 7.40 + free_domain_struct(d); 7.41 return NULL; 7.42 } 7.43 7.44 /* We use a large intermediate to avoid overflow in sprintf. */ 7.45 sprintf(buf, "Domain-%u", dom_id); 7.46 - strncpy(p->name, buf, MAX_DOMAIN_NAME); 7.47 - p->name[MAX_DOMAIN_NAME-1] = '\0'; 7.48 + strncpy(d->name, buf, MAX_DOMAIN_NAME); 7.49 + d->name[MAX_DOMAIN_NAME-1] = '\0'; 7.50 7.51 - p->addr_limit = USER_DS; 7.52 + d->addr_limit = USER_DS; 7.53 7.54 - spin_lock_init(&p->page_list_lock); 7.55 - INIT_LIST_HEAD(&p->page_list); 7.56 - p->max_pages = p->tot_pages = 0; 7.57 + spin_lock_init(&d->page_list_lock); 7.58 + INIT_LIST_HEAD(&d->page_list); 7.59 + d->max_pages = d->tot_pages = 0; 7.60 7.61 - p->shared_info = (void *)get_free_page(); 7.62 - memset(p->shared_info, 0, PAGE_SIZE); 7.63 - SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p); 7.64 - machine_to_phys_mapping[virt_to_phys(p->shared_info) >> 7.65 + d->shared_info = (void *)get_free_page(); 7.66 + memset(d->shared_info, 0, PAGE_SIZE); 7.67 + SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 7.68 + machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 7.69 PAGE_SHIFT] = 0x80000000UL; /* debug */ 7.70 7.71 - p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(); 7.72 - memset(p->mm.perdomain_pt, 0, PAGE_SIZE); 7.73 - machine_to_phys_mapping[virt_to_phys(p->mm.perdomain_pt) >> 7.74 + d->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(); 7.75 + memset(d->mm.perdomain_pt, 0, PAGE_SIZE); 7.76 + machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >> 7.77 PAGE_SHIFT] = 0x0fffdeadUL; /* debug */ 7.78 7.79 /* Per-domain PCI-device list. */ 7.80 - spin_lock_init(&p->pcidev_lock); 7.81 - INIT_LIST_HEAD(&p->pcidev_list); 7.82 + spin_lock_init(&d->pcidev_lock); 7.83 + INIT_LIST_HEAD(&d->pcidev_list); 7.84 7.85 - sched_add_domain(p); 7.86 + sched_add_domain(d); 7.87 7.88 write_lock_irqsave(&tasklist_lock, flags); 7.89 - pp = &task_list; /* NB. task_list is maintained in order of dom_id. */ 7.90 - for ( pp = &task_list; *pp != NULL; pp = &(*pp)->next_list ) 7.91 - if ( (*pp)->domain > p->domain ) 7.92 + pd = &task_list; /* NB. task_list is maintained in order of dom_id. */ 7.93 + for ( pd = &task_list; *pd != NULL; pd = &(*pd)->next_list ) 7.94 + if ( (*pd)->domain > d->domain ) 7.95 break; 7.96 - p->next_list = *pp; 7.97 - *pp = p; 7.98 - p->next_hash = task_hash[TASK_HASH(dom_id)]; 7.99 - task_hash[TASK_HASH(dom_id)] = p; 7.100 + d->next_list = *pd; 7.101 + *pd = d; 7.102 + d->next_hash = task_hash[TASK_HASH(dom_id)]; 7.103 + task_hash[TASK_HASH(dom_id)] = d; 7.104 write_unlock_irqrestore(&tasklist_lock, flags); 7.105 } 7.106 else 7.107 { 7.108 - sprintf(p->name, "Idle-%d", cpu); 7.109 - sched_add_domain(p); 7.110 + sprintf(d->name, "Idle-%d", cpu); 7.111 + sched_add_domain(d); 7.112 } 7.113 7.114 - 7.115 - return p; 7.116 + return d; 7.117 } 7.118 7.119 7.120 struct domain *find_domain_by_id(domid_t dom) 7.121 { 7.122 - struct domain *p; 7.123 + struct domain *d; 7.124 unsigned long flags; 7.125 7.126 read_lock_irqsave(&tasklist_lock, flags); 7.127 - p = task_hash[TASK_HASH(dom)]; 7.128 - while ( p != NULL ) 7.129 + d = task_hash[TASK_HASH(dom)]; 7.130 + while ( d != NULL ) 7.131 { 7.132 - if ( p->domain == dom ) 7.133 + if ( d->domain == dom ) 7.134 { 7.135 - if ( unlikely(!get_domain(p)) ) 7.136 - p = NULL; 7.137 + if ( unlikely(!get_domain(d)) ) 7.138 + d = NULL; 7.139 break; 7.140 } 7.141 - p = p->next_hash; 7.142 + d = d->next_hash; 7.143 } 7.144 read_unlock_irqrestore(&tasklist_lock, flags); 7.145 7.146 - return p; 7.147 + return d; 7.148 } 7.149 7.150 7.151 -/* return the most recent domain created */ 7.152 +/* Return the most recently created domain. */ 7.153 struct domain *find_last_domain(void) 7.154 { 7.155 - struct domain *p, *plast; 7.156 + struct domain *d, *dlast; 7.157 unsigned long flags; 7.158 7.159 read_lock_irqsave(&tasklist_lock, flags); 7.160 - plast = task_list; 7.161 - p = plast->next_list; 7.162 - while ( p != NULL ) 7.163 + dlast = task_list; 7.164 + d = dlast->next_list; 7.165 + while ( d != NULL ) 7.166 { 7.167 - if ( p->create_time > plast->create_time ) 7.168 - plast = p; 7.169 - p = p->next_list; 7.170 + if ( d->create_time > dlast->create_time ) 7.171 + dlast = d; 7.172 + d = d->next_list; 7.173 } 7.174 - if ( !get_domain(plast) ) 7.175 - plast = NULL; 7.176 + if ( !get_domain(dlast) ) 7.177 + dlast = NULL; 7.178 read_unlock_irqrestore(&tasklist_lock, flags); 7.179 7.180 - return plast; 7.181 + return dlast; 7.182 } 7.183 7.184 7.185 @@ -288,50 +287,31 @@ struct pfn_info *alloc_domain_page(struc 7.186 7.187 void free_domain_page(struct pfn_info *page) 7.188 { 7.189 - unsigned long flags; 7.190 + unsigned long flags; 7.191 + int drop_dom_ref; 7.192 struct domain *d = page->u.domain; 7.193 7.194 - ASSERT(!in_irq()); 7.195 + /* Deallocation of such pages is handled out of band. */ 7.196 + if ( unlikely(IS_XEN_HEAP_FRAME(page)) ) 7.197 + return; 7.198 + 7.199 + page->tlbflush_timestamp = tlbflush_clock; 7.200 + page->u.cpu_mask = 1 << d->processor; 7.201 7.202 - if ( likely(!IS_XEN_HEAP_FRAME(page)) ) 7.203 - { 7.204 - page->u.cpu_mask = 0; 7.205 - page->tlbflush_timestamp = tlbflush_clock; 7.206 - if ( likely(d != NULL) ) 7.207 - { 7.208 - page->u.cpu_mask = 1 << d->processor; 7.209 - /* NB. May recursively lock from domain_relinquish_memory(). */ 7.210 - spin_lock_recursive(&d->page_list_lock); 7.211 - list_del(&page->list); 7.212 - if ( unlikely(--d->tot_pages == 0) ) 7.213 - { 7.214 - spin_unlock_recursive(&d->page_list_lock); 7.215 - put_domain(d); /* Domain 'd' can disappear now. */ 7.216 - } 7.217 - else 7.218 - { 7.219 - spin_unlock_recursive(&d->page_list_lock); 7.220 - } 7.221 - } 7.222 + /* NB. May recursively lock from domain_relinquish_memory(). */ 7.223 + spin_lock_recursive(&d->page_list_lock); 7.224 + list_del(&page->list); 7.225 + drop_dom_ref = (--d->tot_pages == 0); 7.226 + spin_unlock_recursive(&d->page_list_lock); 7.227 + if ( drop_dom_ref ) 7.228 + put_domain(d); 7.229 7.230 - page->count_and_flags = 0; 7.231 - 7.232 - spin_lock_irqsave(&free_list_lock, flags); 7.233 - list_add(&page->list, &free_list); 7.234 - free_pfns++; 7.235 - spin_unlock_irqrestore(&free_list_lock, flags); 7.236 - } 7.237 - else 7.238 - { 7.239 - /* 7.240 - * No need for a TLB flush. Non-domain pages are always co-held by Xen, 7.241 - * and the Xen reference is not dropped until the domain is dead. 7.242 - * DOM0 may hold references, but it's trusted so no need to flush. 7.243 - */ 7.244 - page->u.cpu_mask = 0; 7.245 - page->count_and_flags = 0; 7.246 - free_page((unsigned long)page_to_virt(page)); 7.247 - } 7.248 + page->count_and_flags = 0; 7.249 + 7.250 + spin_lock_irqsave(&free_list_lock, flags); 7.251 + list_add(&page->list, &free_list); 7.252 + free_pfns++; 7.253 + spin_unlock_irqrestore(&free_list_lock, flags); 7.254 } 7.255 7.256 7.257 @@ -457,7 +437,7 @@ void domain_destruct(struct domain *d) 7.258 destroy_event_channels(d); 7.259 7.260 free_page((unsigned long)d->mm.perdomain_pt); 7.261 - UNSHARE_PFN(virt_to_page(d->shared_info)); 7.262 + free_page((unsigned long)d->shared_info); 7.263 7.264 free_domain_struct(d); 7.265 }
8.1 --- a/xen/common/kernel.c Sun Jun 27 08:32:57 2004 +0000 8.2 +++ b/xen/common/kernel.c Sun Jun 27 12:22:14 2004 +0000 8.3 @@ -217,15 +217,15 @@ void cmain(unsigned long magic, multiboo 8.4 heap_start = memguard_init(&_end); 8.5 8.6 printk("Xen heap size is %luKB\n", 8.7 - (MAX_MONITOR_ADDRESS-__pa(heap_start))/1024 ); 8.8 + (MAX_XENHEAP_ADDRESS-__pa(heap_start))/1024 ); 8.9 8.10 - if ( ((MAX_MONITOR_ADDRESS-__pa(heap_start))/1024) <= 4096 ) 8.11 + if ( ((MAX_XENHEAP_ADDRESS-__pa(heap_start))/1024) <= 4096 ) 8.12 { 8.13 printk("Xen heap size is too small to safely continue!\n"); 8.14 for ( ; ; ) ; 8.15 } 8.16 8.17 - init_page_allocator(__pa(heap_start), MAX_MONITOR_ADDRESS); 8.18 + init_page_allocator(__pa(heap_start), MAX_XENHEAP_ADDRESS); 8.19 8.20 /* Initialise the slab allocator. */ 8.21 kmem_cache_init();
9.1 --- a/xen/common/memory.c Sun Jun 27 08:32:57 2004 +0000 9.2 +++ b/xen/common/memory.c Sun Jun 27 12:22:14 2004 +0000 9.3 @@ -32,8 +32,8 @@ 9.4 * TOT_COUNT is the obvious reference count. It counts all uses of a 9.5 * physical page frame by a domain, including uses as a page directory, 9.6 * a page table, or simple mappings via a PTE. This count prevents a 9.7 - * domain from releasing a frame back to the hypervisor's free pool when 9.8 - * it still holds a reference to it. 9.9 + * domain from releasing a frame back to the free pool when it still holds 9.10 + * a reference to it. 9.11 * 9.12 * TYPE_COUNT is more subtle. A frame can be put to one of three 9.13 * mutually-exclusive uses: it might be used as a page directory, or a 9.14 @@ -83,48 +83,6 @@ 9.15 * an application-supplied buffer). 9.16 */ 9.17 9.18 - 9.19 -/* 9.20 - * THE FOLLOWING ARE ISSUES IF GUEST OPERATING SYSTEMS BECOME SMP-CAPABLE. 9.21 - * ----------------------------------------------------------------------- 9.22 - * 9.23 - * ********* 9.24 - * UPDATE 15/7/02: Interface has changed --updates now specify physical 9.25 - * address of page-table entry, rather than specifying a virtual address, 9.26 - * so hypervisor no longer "walks" the page tables. Therefore the 9.27 - * solution below cannot work. Another possibility is to add a new entry 9.28 - * to our "struct page" which says to which top-level page table each 9.29 - * lower-level page table or writeable mapping belongs. If it belongs to more 9.30 - * than one, we'd probably just flush on all processors running the domain. 9.31 - * ********* 9.32 - * 9.33 - * The problem involves creating new page tables which might be mapped 9.34 - * writeable in the TLB of another processor. As an example, a domain might be 9.35 - * running in two contexts (ie. on two processors) simultaneously, using the 9.36 - * same top-level page table in both contexts. Now, if context 1 sends an 9.37 - * update request [make page P read-only, add a reference to page P as a page 9.38 - * table], that will succeed if there was only one writeable mapping of P. 9.39 - * However, that mapping may persist in the TLB of context 2. 9.40 - * 9.41 - * Solution: when installing a new page table, we must flush foreign TLBs as 9.42 - * necessary. Naive solution is to flush on any processor running our domain. 9.43 - * Cleverer solution is to flush on any processor running same top-level page 9.44 - * table, but this will sometimes fail (consider two different top-level page 9.45 - * tables which have a shared lower-level page table). 9.46 - * 9.47 - * A better solution: when squashing a write reference, check how many times 9.48 - * that lowest-level table entry is referenced by ORing refcounts of tables 9.49 - * down the page-table hierarchy. If results is != 1, we require flushing all 9.50 - * instances of current domain if a new table is installed (because the 9.51 - * lowest-level entry may be referenced by many top-level page tables). 9.52 - * However, common case will be that result == 1, so we only need to flush 9.53 - * processors with the same top-level page table. Make choice at 9.54 - * table-installation time based on a `flush_level' flag, which is 9.55 - * FLUSH_NONE, FLUSH_PAGETABLE, FLUSH_DOMAIN. A flush reduces this 9.56 - * to FLUSH_NONE, while squashed write mappings can only promote up 9.57 - * to more aggressive flush types. 9.58 - */ 9.59 - 9.60 #include <xen/config.h> 9.61 #include <xen/init.h> 9.62 #include <xen/lib.h> 9.63 @@ -186,11 +144,6 @@ static struct { 9.64 #define GPS (percpu_info[smp_processor_id()].gps ? : current) 9.65 9.66 9.67 -/* 9.68 - * init_frametable: 9.69 - * Initialise per-frame memory information. This goes directly after 9.70 - * MAX_MONITOR_ADDRESS in physical memory. 9.71 - */ 9.72 void __init init_frametable(unsigned long nr_pages) 9.73 { 9.74 unsigned long mfn; 9.75 @@ -619,7 +572,7 @@ static int mod_l2_entry(l2_pgentry_t *pl 9.76 if ( unlikely((((unsigned long)pl2e & (PAGE_SIZE-1)) >> 2) >= 9.77 DOMAIN_ENTRIES_PER_L2_PAGETABLE) ) 9.78 { 9.79 - MEM_LOG("Illegal L2 update attempt in hypervisor area %p", pl2e); 9.80 + MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e); 9.81 return 0; 9.82 } 9.83
10.1 --- a/xen/include/acpi/platform/aclinux.h Sun Jun 27 08:32:57 2004 +0000 10.2 +++ b/xen/include/acpi/platform/aclinux.h Sun Jun 27 12:22:14 2004 +0000 10.3 @@ -49,7 +49,7 @@ 10.4 #define ACPI_USE_SYSTEM_CLIBRARY 10.5 #define ACPI_USE_DO_WHILE_0 10.6 10.7 -#ifdef __KERNEL__ 10.8 +#if 1 /*def __KERNEL__*/ 10.9 10.10 #include <xen/config.h> 10.11 #include <xen/string.h>
11.1 --- a/xen/include/asm-x86/acpi.h Sun Jun 27 08:32:57 2004 +0000 11.2 +++ b/xen/include/asm-x86/acpi.h Sun Jun 27 12:22:14 2004 +0000 11.3 @@ -26,8 +26,6 @@ 11.4 #ifndef _ASM_ACPI_H 11.5 #define _ASM_ACPI_H 11.6 11.7 -#ifdef __KERNEL__ 11.8 - 11.9 #define COMPILER_DEPENDENT_INT64 long long 11.10 #define COMPILER_DEPENDENT_UINT64 unsigned long long 11.11 11.12 @@ -164,7 +162,4 @@ extern void acpi_reserve_bootmem(void); 11.13 11.14 #endif /*CONFIG_ACPI_SLEEP*/ 11.15 11.16 - 11.17 -#endif /*__KERNEL__*/ 11.18 - 11.19 #endif /*_ASM_ACPI_H*/
12.1 --- a/xen/include/asm-x86/config.h Sun Jun 27 08:32:57 2004 +0000 12.2 +++ b/xen/include/asm-x86/config.h Sun Jun 27 12:22:14 2004 +0000 12.3 @@ -158,6 +158,10 @@ extern void __out_of_line_bug(int line) 12.4 12.5 #elif defined(__i386__) 12.6 12.7 +/* The following are machine addresses. */ 12.8 +#define MAX_XENHEAP_ADDRESS (12*1024*1024) 12.9 +#define MAX_DIRECTMAP_ADDRESS (40*1024*1024) 12.10 + 12.11 /* Hypervisor owns top 64MB of virtual address space. */ 12.12 #define HYPERVISOR_VIRT_START (0xFC000000UL) 12.13 12.14 @@ -167,18 +171,12 @@ extern void __out_of_line_bug(int line) 12.15 */ 12.16 #define RO_MPT_VIRT_START (HYPERVISOR_VIRT_START) 12.17 #define RO_MPT_VIRT_END (RO_MPT_VIRT_START + (4*1024*1024)) 12.18 -/* 12.19 - * Next 12MB is fixed monitor space, which is part of a 40MB direct-mapped 12.20 - * memory region. The following are machine addresses. 12.21 - */ 12.22 -#define MAX_MONITOR_ADDRESS (12*1024*1024) 12.23 -#define MAX_DIRECTMAP_ADDRESS (40*1024*1024) 12.24 -/* And the virtual addresses for the direct-map region... */ 12.25 +/* The virtual addresses for the 40MB direct-map region. */ 12.26 #define DIRECTMAP_VIRT_START (RO_MPT_VIRT_END) 12.27 #define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + MAX_DIRECTMAP_ADDRESS) 12.28 -#define MONITOR_VIRT_START (DIRECTMAP_VIRT_START) 12.29 -#define MONITOR_VIRT_END (MONITOR_VIRT_START + MAX_MONITOR_ADDRESS) 12.30 -#define RDWR_MPT_VIRT_START (MONITOR_VIRT_END) 12.31 +#define XENHEAP_VIRT_START (DIRECTMAP_VIRT_START) 12.32 +#define XENHEAP_VIRT_END (XENHEAP_VIRT_START + MAX_XENHEAP_ADDRESS) 12.33 +#define RDWR_MPT_VIRT_START (XENHEAP_VIRT_END) 12.34 #define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (4*1024*1024)) 12.35 #define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END) 12.36 #define FRAMETABLE_VIRT_END (DIRECTMAP_VIRT_END)
13.1 --- a/xen/include/asm-x86/string.h Sun Jun 27 08:32:57 2004 +0000 13.2 +++ b/xen/include/asm-x86/string.h Sun Jun 27 12:22:14 2004 +0000 13.3 @@ -1,20 +1,7 @@ 13.4 #ifndef _I386_STRING_H_ 13.5 #define _I386_STRING_H_ 13.6 13.7 -#ifdef __KERNEL__ 13.8 #include <xen/config.h> 13.9 -/* 13.10 - * On a 486 or Pentium, we are better off not using the 13.11 - * byte string operations. But on a 386 or a PPro the 13.12 - * byte string ops are faster than doing it by hand 13.13 - * (MUCH faster on a Pentium). 13.14 - * 13.15 - * Also, the byte strings actually work correctly. Forget 13.16 - * the i486 routines for now as they may be broken.. 13.17 - */ 13.18 -#if FIXED_486_STRING && defined(CONFIG_X86_USE_STRING_486) 13.19 -#include <asm/string-486.h> 13.20 -#else 13.21 13.22 /* 13.23 * This string-include defines all string functions as inline 13.24 @@ -495,7 +482,4 @@ static inline void * memscan(void * addr 13.25 return addr; 13.26 } 13.27 13.28 -#endif /* CONFIG_X86_USE_STRING_486 */ 13.29 -#endif /* __KERNEL__ */ 13.30 - 13.31 #endif
14.1 --- a/xen/include/asm-x86/x86_32/ptrace.h Sun Jun 27 08:32:57 2004 +0000 14.2 +++ b/xen/include/asm-x86/x86_32/ptrace.h Sun Jun 27 12:22:14 2004 +0000 14.3 @@ -44,8 +44,4 @@ enum EFLAGS { 14.4 EF_ID = 0x00200000, /* id */ 14.5 }; 14.6 14.7 -#ifdef __KERNEL__ 14.8 -#define user_mode(regs) ((3 & (regs)->xcs)) 14.9 #endif 14.10 - 14.11 -#endif
15.1 --- a/xen/include/asm-x86/x86_64/ptrace.h Sun Jun 27 08:32:57 2004 +0000 15.2 +++ b/xen/include/asm-x86/x86_64/ptrace.h Sun Jun 27 12:22:14 2004 +0000 15.3 @@ -81,8 +81,8 @@ struct pt_regs { 15.4 #define PTRACE_GETFPXREGS 18 15.5 #define PTRACE_SETFPXREGS 19 15.6 15.7 -#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 15.8 -#define user_mode(regs) (!!((regs)->cs & 3)) 15.9 +#if !defined(__ASSEMBLY__) 15.10 + 15.11 #define instruction_pointer(regs) ((regs)->rip) 15.12 extern void show_regs(struct pt_regs *); 15.13
16.1 --- a/xen/include/hypervisor-ifs/trace.h Sun Jun 27 08:32:57 2004 +0000 16.2 +++ b/xen/include/hypervisor-ifs/trace.h Sun Jun 27 12:22:14 2004 +0000 16.3 @@ -20,15 +20,12 @@ struct t_buf { 16.4 struct t_rec *data; /* pointer to data area. physical address 16.5 * for convenience in user space code */ 16.6 16.7 - unsigned long size; /* size of the data area, in t_recs */ 16.8 - unsigned long head; /* array index of the most recent record */ 16.9 + unsigned long size; /* size of the data area, in t_recs */ 16.10 + unsigned long head; /* array index of the most recent record */ 16.11 16.12 -#ifdef __KERNEL__ 16.13 + /* Kernel-private elements follow... */ 16.14 struct t_rec *head_ptr; /* pointer to the head record */ 16.15 struct t_rec *vdata; /* virtual address pointer to data */ 16.16 -#endif 16.17 - 16.18 - /* never add anything here - the kernel stuff must be the last elements */ 16.19 }; 16.20 16.21 #endif /* __HYPERVISOR_IFS_TRACE_H__ */
17.1 --- a/xen/include/xen/mm.h Sun Jun 27 08:32:57 2004 +0000 17.2 +++ b/xen/include/xen/mm.h Sun Jun 27 12:22:14 2004 +0000 17.3 @@ -86,18 +86,18 @@ struct pfn_info 17.4 #define PageSetSlab(page) ((void)0) 17.5 #define PageClearSlab(page) ((void)0) 17.6 17.7 -#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < MAX_MONITOR_ADDRESS) 17.8 +#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < MAX_XENHEAP_ADDRESS) 17.9 17.10 -#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \ 17.11 - do { \ 17.12 - (_pfn)->u.domain = (_dom); \ 17.13 - wmb(); /* install valid domain ptr before updating refcnt. */ \ 17.14 - (_pfn)->count_and_flags = 1; /* Xen holds a writeable reference */ \ 17.15 - (_pfn)->type_and_flags = PGT_writeable_page | PGT_validated | 1; \ 17.16 +#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \ 17.17 + do { \ 17.18 + (_pfn)->u.domain = (_dom); \ 17.19 + wmb(); /* install valid domain ptr before updating refcnt. */ \ 17.20 + /* _dom holds an allocation reference */ \ 17.21 + (_pfn)->count_and_flags = PGC_allocated | 1; \ 17.22 + /* The incremented type count is intended to pin to 'writeable'. */ \ 17.23 + (_pfn)->type_and_flags = PGT_writeable_page | PGT_validated | 1; \ 17.24 } while ( 0 ) 17.25 17.26 -#define UNSHARE_PFN(_pfn) put_page_and_type(_pfn) 17.27 - 17.28 extern struct pfn_info *frame_table; 17.29 extern unsigned long frame_table_size; 17.30 extern struct list_head free_list;
18.1 --- a/xen/include/xen/pci.h Sun Jun 27 08:32:57 2004 +0000 18.2 +++ b/xen/include/xen/pci.h Sun Jun 27 12:22:14 2004 +0000 18.3 @@ -326,8 +326,6 @@ 18.4 #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ 18.5 #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ 18.6 18.7 -#ifdef __KERNEL__ 18.8 - 18.9 #include <xen/types.h> 18.10 #include <xen/config.h> 18.11 #include <xen/ioport.h> 18.12 @@ -833,5 +831,4 @@ extern int pci_pci_problems; 18.13 #define PCIPCI_VSFX 16 18.14 #define PCIPCI_ALIMAGIK 32 18.15 18.16 -#endif /* __KERNEL__ */ 18.17 #endif /* LINUX_PCI_H */
19.1 --- a/xen/include/xen/string.h Sun Jun 27 08:32:57 2004 +0000 19.2 +++ b/xen/include/xen/string.h Sun Jun 27 12:22:14 2004 +0000 19.3 @@ -1,12 +1,7 @@ 19.4 #ifndef _LINUX_STRING_H_ 19.5 #define _LINUX_STRING_H_ 19.6 19.7 -/* We don't want strings.h stuff being user by user stuff by accident */ 19.8 - 19.9 -#ifdef __KERNEL__ 19.10 - 19.11 #include <xen/types.h> /* for size_t */ 19.12 -//#include <xen/stddef.h> /* for NULL */ 19.13 19.14 #ifdef __cplusplus 19.15 extern "C" { 19.16 @@ -86,5 +81,4 @@ extern void * memchr(const void *,int,__ 19.17 } 19.18 #endif 19.19 19.20 -#endif 19.21 #endif /* _LINUX_STRING_H_ */
20.1 --- a/xen/tools/elf-reloc.c Sun Jun 27 08:32:57 2004 +0000 20.2 +++ b/xen/tools/elf-reloc.c Sun Jun 27 12:22:14 2004 +0000 20.3 @@ -80,9 +80,6 @@ int main(int argc, char **argv) 20.4 new_base = strtoul(argv[2], NULL, 16); 20.5 image_name = argv[3]; 20.6 20.7 - printf("Relocating `%s' from 0x%08lX to 0x%08lX\n", 20.8 - image_name, old_base, new_base); 20.9 - 20.10 fp = fopen(image_name, "rb+"); 20.11 if ( !fp ) 20.12 {