ia64/xen-unstable
changeset 4706:65b28c74cec2
bitkeeper revision 1.1393 (4271e39733erltTpi7grGFwvE3eDmw)
Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
line diff
1.1 --- a/.rootkeys Thu Apr 28 13:54:01 2005 +0000 1.2 +++ b/.rootkeys Fri Apr 29 07:34:47 2005 +0000 1.3 @@ -152,6 +152,7 @@ 3e5a4e65RMGcuA-HCn3-wNx3fFQwdg linux-2.4 1.4 4241709bNBs1q4Ss32YW0CyFVOGhEg linux-2.4.29-xen-sparse/arch/xen/kernel/ioport.c 1.5 3e5a4e653U6cELGv528IxOLHvCq8iA linux-2.4.29-xen-sparse/arch/xen/kernel/irq.c 1.6 3e5a4e65muT6SU3ck47IP87Q7Ti5hA linux-2.4.29-xen-sparse/arch/xen/kernel/ldt.c 1.7 +4270e964iKFC24KiVm6jC5Eo7MxV6w linux-2.4.29-xen-sparse/arch/xen/kernel/pci-dma.c 1.8 4051db95N9N99FjsRwi49YKUNHWI8A linux-2.4.29-xen-sparse/arch/xen/kernel/pci-pc.c 1.9 3e5a4e65IGt3WwQDNiL4h-gYWgNTWQ linux-2.4.29-xen-sparse/arch/xen/kernel/process.c 1.10 3e5a4e66tR-qJMLj3MppcKqmvuI2XQ linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/linux-2.4.29-xen-sparse/arch/xen/kernel/pci-dma.c Fri Apr 29 07:34:47 2005 +0000 2.3 @@ -0,0 +1,137 @@ 2.4 +/* 2.5 + * Dynamic DMA mapping support. 2.6 + * 2.7 + * On i386 there is no hardware dynamic DMA address translation, 2.8 + * so consistent alloc/free are merely page allocation/freeing. 2.9 + * The rest of the dynamic DMA mapping interface is implemented 2.10 + * in asm/pci.h. 2.11 + */ 2.12 + 2.13 +#include <linux/types.h> 2.14 +#include <linux/mm.h> 2.15 +#include <linux/string.h> 2.16 +#include <linux/pci.h> 2.17 +#include <linux/version.h> 2.18 +#include <asm/io.h> 2.19 +#include <asm-xen/balloon.h> 2.20 + 2.21 +#define pte_offset_kernel pte_offset 2.22 + 2.23 +struct dma_coherent_mem { 2.24 + void *virt_base; 2.25 + u32 device_base; 2.26 + int size; 2.27 + int flags; 2.28 + unsigned long *bitmap; 2.29 +}; 2.30 + 2.31 +static void 2.32 +xen_contig_memory(unsigned long vstart, unsigned int order) 2.33 +{ 2.34 + /* 2.35 + * Ensure multi-page extents are contiguous in machine memory. 2.36 + * This code could be cleaned up some, and the number of 2.37 + * hypercalls reduced. 2.38 + */ 2.39 + pgd_t *pgd; 2.40 + pmd_t *pmd; 2.41 + pte_t *pte; 2.42 + unsigned long pfn, i, flags; 2.43 + 2.44 + scrub_pages(vstart, 1 << order); 2.45 + 2.46 + balloon_lock(flags); 2.47 + 2.48 + /* 1. Zap current PTEs, giving away the underlying pages. */ 2.49 + for (i = 0; i < (1<<order); i++) { 2.50 + pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 2.51 + pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); 2.52 + pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 2.53 + pfn = pte->pte_low >> PAGE_SHIFT; 2.54 + HYPERVISOR_update_va_mapping( 2.55 + vstart + (i*PAGE_SIZE), __pte_ma(0), 0); 2.56 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 2.57 + INVALID_P2M_ENTRY; 2.58 + if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 2.59 + &pfn, 1, 0) != 1) BUG(); 2.60 + } 2.61 + /* 2. Get a new contiguous memory extent. */ 2.62 + if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 2.63 + &pfn, 1, order) != 1) BUG(); 2.64 + /* 3. Map the new extent in place of old pages. */ 2.65 + for (i = 0; i < (1<<order); i++) { 2.66 + pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 2.67 + pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); 2.68 + pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 2.69 + HYPERVISOR_update_va_mapping( 2.70 + vstart + (i*PAGE_SIZE), 2.71 + __pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0); 2.72 + xen_machphys_update( 2.73 + pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); 2.74 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 2.75 + pfn+i; 2.76 + } 2.77 + /* Flush updates through and flush the TLB. */ 2.78 + flush_tlb_all(); 2.79 + 2.80 + balloon_unlock(flags); 2.81 +} 2.82 + 2.83 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 2.84 +void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, 2.85 + dma_addr_t *dma_handle) 2.86 +#else 2.87 +void *dma_alloc_coherent(struct device *dev, size_t size, 2.88 + dma_addr_t *dma_handle, int gfp) 2.89 +#endif 2.90 +{ 2.91 + void *ret; 2.92 + unsigned int order = get_order(size); 2.93 + unsigned long vstart; 2.94 + 2.95 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 2.96 + int gfp = GFP_ATOMIC; 2.97 + 2.98 + if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff)) 2.99 + gfp |= GFP_DMA; 2.100 +#else 2.101 + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 2.102 + 2.103 + /* ignore region specifiers */ 2.104 + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 2.105 + 2.106 + if (mem) { 2.107 + int page = bitmap_find_free_region(mem->bitmap, mem->size, 2.108 + order); 2.109 + if (page >= 0) { 2.110 + *dma_handle = mem->device_base + (page << PAGE_SHIFT); 2.111 + ret = mem->virt_base + (page << PAGE_SHIFT); 2.112 + memset(ret, 0, size); 2.113 + return ret; 2.114 + } 2.115 + if (mem->flags & DMA_MEMORY_EXCLUSIVE) 2.116 + return NULL; 2.117 + } 2.118 + 2.119 + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 2.120 + gfp |= GFP_DMA; 2.121 +#endif 2.122 + 2.123 + vstart = __get_free_pages(gfp, order); 2.124 + ret = (void *)vstart; 2.125 + if (ret == NULL) 2.126 + return ret; 2.127 + 2.128 + xen_contig_memory(vstart, order); 2.129 + 2.130 + memset(ret, 0, size); 2.131 + *dma_handle = virt_to_bus(ret); 2.132 + 2.133 + return ret; 2.134 +} 2.135 + 2.136 +void pci_free_consistent(struct pci_dev *hwdev, size_t size, 2.137 + void *vaddr, dma_addr_t dma_handle) 2.138 +{ 2.139 + free_pages((unsigned long)vaddr, get_order(size)); 2.140 +}
4.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c Thu Apr 28 13:54:01 2005 +0000 4.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c Fri Apr 29 07:34:47 2005 +0000 4.3 @@ -600,7 +600,7 @@ void __init cpu_init (void) 4.4 * Set up the per-thread TLS descriptor cache: 4.5 */ 4.6 memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN], 4.7 - GDT_ENTRY_TLS_ENTRIES * 8); 4.8 + GDT_ENTRY_TLS_ENTRIES * 8); 4.9 4.10 cpu_gdt_init(&cpu_gdt_descr[cpu]); 4.11
5.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S Thu Apr 28 13:54:01 2005 +0000 5.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S Fri Apr 29 07:34:47 2005 +0000 5.3 @@ -2,7 +2,9 @@ 5.4 #include <linux/config.h> 5.5 5.6 .section __xen_guest 5.7 - .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xC0000000" 5.8 + .ascii "GUEST_OS=linux,GUEST_VER=2.6" 5.9 + .ascii ",XEN_VER=3.0" 5.10 + .ascii ",VIRT_BASE=0xC0000000" 5.11 .ascii ",LOADER=generic" 5.12 .byte 0 5.13
7.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c Thu Apr 28 13:54:01 2005 +0000 7.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c Fri Apr 29 07:34:47 2005 +0000 7.3 @@ -44,13 +44,13 @@ xen_contig_memory(unsigned long vstart, 7.4 7.5 /* 1. Zap current PTEs, giving away the underlying pages. */ 7.6 for (i = 0; i < (1<<order); i++) { 7.7 - pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 7.8 - pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 7.9 - pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 7.10 - pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 7.11 - pfn = pte->pte_low >> PAGE_SHIFT; 7.12 - HYPERVISOR_update_va_mapping( 7.13 - vstart + (i*PAGE_SIZE), __pte_ma(0), 0); 7.14 + pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); 7.15 + pud = pud_offset(pgd, vstart + (i*PAGE_SIZE)); 7.16 + pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE)); 7.17 + pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE)); 7.18 + pfn = pte_val_ma(*pte) >> PAGE_SHIFT; 7.19 + HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 7.20 + __pte_ma(0), 0); 7.21 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 7.22 INVALID_P2M_ENTRY; 7.23 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 7.24 @@ -61,17 +61,10 @@ xen_contig_memory(unsigned long vstart, 7.25 &pfn, 1, order) != 1) BUG(); 7.26 /* 3. Map the new extent in place of old pages. */ 7.27 for (i = 0; i < (1<<order); i++) { 7.28 - pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 7.29 - pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 7.30 - pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 7.31 - pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 7.32 - HYPERVISOR_update_va_mapping( 7.33 - vstart + (i*PAGE_SIZE), 7.34 + HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 7.35 __pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0); 7.36 - xen_machphys_update( 7.37 - pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); 7.38 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 7.39 - pfn+i; 7.40 + xen_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); 7.41 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = pfn+i; 7.42 } 7.43 flush_tlb_all(); 7.44 7.45 @@ -82,11 +75,9 @@ void *dma_alloc_coherent(struct device * 7.46 dma_addr_t *dma_handle, int gfp) 7.47 { 7.48 void *ret; 7.49 + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 7.50 unsigned int order = get_order(size); 7.51 unsigned long vstart; 7.52 - 7.53 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 7.54 - 7.55 /* ignore region specifiers */ 7.56 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 7.57 7.58 @@ -108,14 +99,13 @@ void *dma_alloc_coherent(struct device * 7.59 7.60 vstart = __get_free_pages(gfp, order); 7.61 ret = (void *)vstart; 7.62 - if (ret == NULL) 7.63 - return ret; 7.64 + 7.65 + if (ret != NULL) { 7.66 + xen_contig_memory(vstart, order); 7.67 7.68 - xen_contig_memory(vstart, order); 7.69 - 7.70 - memset(ret, 0, size); 7.71 - *dma_handle = virt_to_bus(ret); 7.72 - 7.73 + memset(ret, 0, size); 7.74 + *dma_handle = virt_to_bus(ret); 7.75 + } 7.76 return ret; 7.77 } 7.78
8.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c Thu Apr 28 13:54:01 2005 +0000 8.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c Fri Apr 29 07:34:47 2005 +0000 8.3 @@ -329,7 +329,7 @@ int copy_thread(int nr, unsigned long cl 8.4 desc->b = LDT_entry_b(&info); 8.5 } 8.6 8.7 - p->thread.io_pl = current->thread.io_pl; 8.8 + p->thread.io_pl = current->thread.io_pl; 8.9 8.10 err = 0; 8.11 out: 8.12 @@ -445,7 +445,7 @@ struct task_struct fastcall * __switch_t 8.13 physdev_op_t iopl_op, iobmp_op; 8.14 multicall_entry_t _mcl[8], *mcl = _mcl; 8.15 8.16 - /* XEN NOTE: FS/GS saved in switch_mm(), not here. */ 8.17 + /* XEN NOTE: FS/GS saved in switch_mm(), not here. */ 8.18 8.19 /* 8.20 * This is basically '__unlazy_fpu', except that we queue a
9.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c Thu Apr 28 13:54:01 2005 +0000 9.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c Fri Apr 29 07:34:47 2005 +0000 9.3 @@ -40,6 +40,7 @@ 9.4 #include <linux/efi.h> 9.5 #include <linux/init.h> 9.6 #include <linux/edd.h> 9.7 +#include <linux/kernel.h> 9.8 #include <linux/percpu.h> 9.9 #include <linux/notifier.h> 9.10 #include <video/edid.h> 9.11 @@ -59,15 +60,11 @@ 9.12 /* Allows setting of maximum possible memory size */ 9.13 static unsigned long xen_override_max_pfn; 9.14 9.15 -extern struct notifier_block *panic_notifier_list; 9.16 static int xen_panic_event(struct notifier_block *, unsigned long, void *); 9.17 static struct notifier_block xen_panic_block = { 9.18 - xen_panic_event, 9.19 - NULL, 9.20 - 0 /* try to go last */ 9.21 + xen_panic_event, NULL, 0 /* try to go last */ 9.22 }; 9.23 9.24 - 9.25 int disable_pse __initdata = 0; 9.26 9.27 /* 9.28 @@ -901,6 +898,7 @@ efi_find_max_pfn(unsigned long start, un 9.29 return 0; 9.30 } 9.31 9.32 + 9.33 /* 9.34 * Find the highest page frame number we have available 9.35 */ 9.36 @@ -1397,22 +1395,21 @@ static void set_mca_bus(int x) { } 9.37 */ 9.38 void __init setup_arch(char **cmdline_p) 9.39 { 9.40 - int i,j; 9.41 + int i, j; 9.42 physdev_op_t op; 9.43 unsigned long max_low_pfn; 9.44 9.45 /* Force a quick death if the kernel panics. */ 9.46 extern int panic_timeout; 9.47 - if ( panic_timeout == 0 ) 9.48 + if (panic_timeout == 0) 9.49 panic_timeout = 1; 9.50 9.51 /* Register a call for panic conditions. */ 9.52 notifier_chain_register(&panic_notifier_list, &xen_panic_block); 9.53 9.54 - HYPERVISOR_vm_assist( 9.55 - VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 9.56 - HYPERVISOR_vm_assist( 9.57 - VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); 9.58 + HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 9.59 + HYPERVISOR_vm_assist(VMASST_CMD_enable, 9.60 + VMASST_TYPE_writable_pagetables); 9.61 9.62 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 9.63 early_cpu_init(); 9.64 @@ -1478,7 +1475,8 @@ void __init setup_arch(char **cmdline_p) 9.65 init_mm.start_code = (unsigned long) _text; 9.66 init_mm.end_code = (unsigned long) _etext; 9.67 init_mm.end_data = (unsigned long) _edata; 9.68 - init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT; 9.69 + init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + 9.70 + xen_start_info.nr_pt_frames) << PAGE_SHIFT; 9.71 9.72 /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */ 9.73 /*code_resource.start = virt_to_phys(_text);*/ 9.74 @@ -1511,7 +1509,7 @@ void __init setup_arch(char **cmdline_p) 9.75 max_pfn * sizeof(unsigned long)); 9.76 9.77 if (max_pfn > xen_start_info.nr_pages) { 9.78 - /* set to INVALID_P2M_ENTRY */ 9.79 + /* set to INVALID_P2M_ENTRY */ 9.80 memset(phys_to_machine_mapping, ~0, 9.81 max_pfn * sizeof(unsigned long)); 9.82 memcpy(phys_to_machine_mapping, 9.83 @@ -1617,16 +1615,14 @@ void __init setup_arch(char **cmdline_p) 9.84 } 9.85 } 9.86 9.87 - 9.88 static int 9.89 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 9.90 { 9.91 - HYPERVISOR_crash(); 9.92 - /* we're never actually going to get here... */ 9.93 - return NOTIFY_DONE; 9.94 + HYPERVISOR_crash(); 9.95 + /* we're never actually going to get here... */ 9.96 + return NOTIFY_DONE; 9.97 } 9.98 9.99 - 9.100 #include "setup_arch_post.h" 9.101 /* 9.102 * Local Variables:
10.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Apr 28 13:54:01 2005 +0000 10.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c Fri Apr 29 07:34:47 2005 +0000 10.3 @@ -820,7 +820,7 @@ static int __init do_boot_cpu(int apicid 10.4 #if 0 10.5 unsigned short nmi_high = 0, nmi_low = 0; 10.6 #endif 10.7 - full_execution_context_t ctxt; 10.8 + vcpu_guest_context_t ctxt; 10.9 extern void startup_32_smp(void); 10.10 extern void hypervisor_callback(void); 10.11 extern void failsafe_callback(void); 10.12 @@ -865,18 +865,18 @@ static int __init do_boot_cpu(int apicid 10.13 10.14 memset(&ctxt, 0, sizeof(ctxt)); 10.15 10.16 - ctxt.cpu_ctxt.ds = __USER_DS; 10.17 - ctxt.cpu_ctxt.es = __USER_DS; 10.18 - ctxt.cpu_ctxt.fs = 0; 10.19 - ctxt.cpu_ctxt.gs = 0; 10.20 - ctxt.cpu_ctxt.ss = __KERNEL_DS; 10.21 - ctxt.cpu_ctxt.cs = __KERNEL_CS; 10.22 - ctxt.cpu_ctxt.eip = start_eip; 10.23 - ctxt.cpu_ctxt.esp = idle->thread.esp; 10.24 - ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12); 10.25 + ctxt.user_regs.ds = __USER_DS; 10.26 + ctxt.user_regs.es = __USER_DS; 10.27 + ctxt.user_regs.fs = 0; 10.28 + ctxt.user_regs.gs = 0; 10.29 + ctxt.user_regs.ss = __KERNEL_DS; 10.30 + ctxt.user_regs.cs = __KERNEL_CS; 10.31 + ctxt.user_regs.eip = start_eip; 10.32 + ctxt.user_regs.esp = idle->thread.esp; 10.33 + ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12); 10.34 10.35 /* FPU is set up to default initial state. */ 10.36 - memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt)); 10.37 + memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt)); 10.38 10.39 /* Virtual IDT is empty at start-of-day. */ 10.40 for ( i = 0; i < 256; i++ ) 10.41 @@ -903,8 +903,8 @@ static int __init do_boot_cpu(int apicid 10.42 } 10.43 10.44 /* Ring 1 stack is the initial stack. */ 10.45 - ctxt.kernel_ss = __KERNEL_DS; 10.46 - ctxt.kernel_esp = idle->thread.esp; 10.47 + ctxt.kernel_ss = __KERNEL_DS; 10.48 + ctxt.kernel_sp = idle->thread.esp; 10.49 10.50 /* Callback handlers. */ 10.51 ctxt.event_callback_cs = __KERNEL_CS;
19.1 --- a/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c Thu Apr 28 13:54:01 2005 +0000 19.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c Fri Apr 29 07:34:47 2005 +0000 19.3 @@ -50,8 +50,8 @@ 19.4 19.5 if ( !test_and_set_bit(0, &printed) ) 19.6 { 19.7 - HYPERVISOR_vm_assist( 19.8 - VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify); 19.9 + HYPERVISOR_vm_assist(VMASST_CMD_disable, 19.10 + VMASST_TYPE_4gb_segments_notify); 19.11 19.12 DP(""); 19.13 DP("***************************************************************"); 19.14 @@ -77,8 +77,7 @@ 19.15 19.16 static int __init fixup_init(void) 19.17 { 19.18 - HYPERVISOR_vm_assist( 19.19 - VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify); 19.20 + HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify); 19.21 return 0; 19.22 } 19.23 __initcall(fixup_init);
22.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S Thu Apr 28 13:54:01 2005 +0000 22.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S Fri Apr 29 07:34:47 2005 +0000 22.3 @@ -50,7 +50,7 @@ 22.4 22.5 22.6 EVENT_MASK = (CS+4) 22.7 -ECF_IN_SYSCALL = (1<<8) 22.8 +VGCF_IN_SYSCALL = (1<<8) 22.9 22.10 /* 22.11 * Copied from arch/xen/i386/kernel/entry.S 22.12 @@ -169,7 +169,7 @@ ECF_IN_SYSCALL = (1<<8) 22.13 * struct switch_to_user { 22.14 * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; 22.15 * } PACKED; 22.16 - * #define ECF_IN_SYSCALL (1<<8) 22.17 + * #define VGCF_IN_SYSCALL (1<<8) 22.18 */ 22.19 .macro SWITCH_TO_USER flag 22.20 movl $0,%gs:pda_kernel_mode # change to user mode 22.21 @@ -275,7 +275,7 @@ sysret_check: 22.22 jnz sysret_careful 22.23 XEN_UNBLOCK_EVENTS(%rsi) 22.24 RESTORE_ARGS 0,8,0 22.25 - SWITCH_TO_USER ECF_IN_SYSCALL 22.26 + SWITCH_TO_USER VGCF_IN_SYSCALL 22.27 22.28 /* Handle reschedules */ 22.29 /* edx: work, edi: workmask */
25.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h Thu Apr 28 13:54:01 2005 +0000 25.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h Fri Apr 29 07:34:47 2005 +0000 25.3 @@ -519,7 +519,7 @@ HYPERVISOR_vm_assist( 25.4 25.5 static inline int 25.6 HYPERVISOR_boot_vcpu( 25.7 - unsigned long vcpu, full_execution_context_t *ctxt) 25.8 + unsigned long vcpu, vcpu_guest_context_t *ctxt) 25.9 { 25.10 int ret; 25.11 unsigned long ign1, ign2;
26.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h Thu Apr 28 13:54:01 2005 +0000 26.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h Fri Apr 29 07:34:47 2005 +0000 26.3 @@ -111,7 +111,7 @@ typedef struct { unsigned long pgprot; } 26.4 static inline unsigned long pgd_val(pgd_t x) 26.5 { 26.6 unsigned long ret = x.pgd; 26.7 - if (ret) ret = machine_to_phys(ret) | 1; 26.8 + if (ret) ret = machine_to_phys(ret); 26.9 return ret; 26.10 } 26.11 #define pgprot_val(x) ((x).pgprot)
31.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Thu Apr 28 13:54:01 2005 +0000 31.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Fri Apr 29 07:34:47 2005 +0000 31.3 @@ -489,7 +489,7 @@ HYPERVISOR_switch_to_user(void) 31.4 31.5 static inline int 31.6 HYPERVISOR_boot_vcpu( 31.7 - unsigned long vcpu, full_execution_context_t *ctxt) 31.8 + unsigned long vcpu, vcpu_guest_context_t *ctxt) 31.9 { 31.10 int ret; 31.11
35.1 --- a/tools/libxc/xc.h Thu Apr 28 13:54:01 2005 +0000 35.2 +++ b/tools/libxc/xc.h Fri Apr 29 07:34:47 2005 +0000 35.3 @@ -160,7 +160,7 @@ int xc_domain_getfullinfo(int xc_handle, 35.4 u32 domid, 35.5 u32 vcpu, 35.6 xc_domaininfo_t *info, 35.7 - full_execution_context_t *ctxt); 35.8 + vcpu_guest_context_t *ctxt); 35.9 int xc_domain_setcpuweight(int xc_handle, 35.10 u32 domid, 35.11 float weight);
36.1 --- a/tools/libxc/xc_domain.c Thu Apr 28 13:54:01 2005 +0000 36.2 +++ b/tools/libxc/xc_domain.c Fri Apr 29 07:34:47 2005 +0000 36.3 @@ -144,7 +144,7 @@ int xc_domain_getfullinfo(int xc_handle, 36.4 u32 domid, 36.5 u32 vcpu, 36.6 xc_domaininfo_t *info, 36.7 - full_execution_context_t *ctxt) 36.8 + vcpu_guest_context_t *ctxt) 36.9 { 36.10 int rc, errno_saved; 36.11 dom0_op_t op;
37.1 --- a/tools/libxc/xc_linux_build.c Thu Apr 28 13:54:01 2005 +0000 37.2 +++ b/tools/libxc/xc_linux_build.c Fri Apr 29 07:34:47 2005 +0000 37.3 @@ -45,7 +45,7 @@ static int setup_guest(int xc_handle, 37.4 gzFile initrd_gfd, unsigned long initrd_len, 37.5 unsigned long nr_pages, 37.6 unsigned long *pvsi, unsigned long *pvke, 37.7 - full_execution_context_t *ctxt, 37.8 + vcpu_guest_context_t *ctxt, 37.9 const char *cmdline, 37.10 unsigned long shared_info_frame, 37.11 unsigned int control_evtchn, 37.12 @@ -316,7 +316,7 @@ int xc_linux_build(int xc_handle, 37.13 int initrd_fd = -1; 37.14 gzFile initrd_gfd = NULL; 37.15 int rc, i; 37.16 - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; 37.17 + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; 37.18 unsigned long nr_pages; 37.19 char *image = NULL; 37.20 unsigned long image_size, initrd_size=0; 37.21 @@ -400,19 +400,19 @@ int xc_linux_build(int xc_handle, 37.22 * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 37.23 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1) 37.24 */ 37.25 - ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS; 37.26 - ctxt->cpu_ctxt.es = FLAT_KERNEL_DS; 37.27 - ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS; 37.28 - ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS; 37.29 - ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS; 37.30 - ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS; 37.31 - ctxt->cpu_ctxt.eip = vkern_entry; 37.32 - ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE; 37.33 - ctxt->cpu_ctxt.esi = vstartinfo_start; 37.34 - ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2); 37.35 + ctxt->user_regs.ds = FLAT_KERNEL_DS; 37.36 + ctxt->user_regs.es = FLAT_KERNEL_DS; 37.37 + ctxt->user_regs.fs = FLAT_KERNEL_DS; 37.38 + ctxt->user_regs.gs = FLAT_KERNEL_DS; 37.39 + ctxt->user_regs.ss = FLAT_KERNEL_DS; 37.40 + ctxt->user_regs.cs = FLAT_KERNEL_CS; 37.41 + ctxt->user_regs.eip = vkern_entry; 37.42 + ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE; 37.43 + ctxt->user_regs.esi = vstartinfo_start; 37.44 + ctxt->user_regs.eflags = (1<<9) | (1<<2); 37.45 37.46 /* FPU is set up to default initial state. */ 37.47 - memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 37.48 + memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 37.49 37.50 /* Virtual IDT is empty at start-of-day. */ 37.51 for ( i = 0; i < 256; i++ ) 37.52 @@ -432,8 +432,8 @@ int xc_linux_build(int xc_handle, 37.53 ctxt->gdt_ents = 0; 37.54 37.55 /* Ring 1 stack is the initial stack. */ 37.56 - ctxt->kernel_ss = FLAT_KERNEL_DS; 37.57 - ctxt->kernel_esp = vstartinfo_start + 2*PAGE_SIZE; 37.58 + ctxt->kernel_ss = FLAT_KERNEL_DS; 37.59 + ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE; 37.60 37.61 /* No debugging. */ 37.62 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
38.1 --- a/tools/libxc/xc_linux_restore.c Thu Apr 28 13:54:01 2005 +0000 38.2 +++ b/tools/libxc/xc_linux_restore.c Fri Apr 29 07:34:47 2005 +0000 38.3 @@ -73,7 +73,7 @@ int xc_linux_restore(int xc_handle, XcIO 38.4 shared_info_t *shared_info = (shared_info_t *)shared_info_page; 38.5 38.6 /* A copy of the CPU context of the guest. */ 38.7 - full_execution_context_t ctxt; 38.8 + vcpu_guest_context_t ctxt; 38.9 38.10 /* First 16 bytes of the state file must contain 'LinuxGuestRecord'. */ 38.11 char signature[16]; 38.12 @@ -505,13 +505,13 @@ int xc_linux_restore(int xc_handle, XcIO 38.13 } 38.14 38.15 /* Uncanonicalise the suspend-record frame number and poke resume rec. */ 38.16 - pfn = ctxt.cpu_ctxt.esi; 38.17 + pfn = ctxt.user_regs.esi; 38.18 if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) ) 38.19 { 38.20 xcio_error(ioctxt, "Suspend record frame number is bad"); 38.21 goto out; 38.22 } 38.23 - ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn]; 38.24 + ctxt.user_regs.esi = mfn = pfn_to_mfn_table[pfn]; 38.25 p_srec = xc_map_foreign_range( 38.26 xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn); 38.27 p_srec->resume_info.nr_pages = nr_pfns; 38.28 @@ -599,7 +599,7 @@ int xc_linux_restore(int xc_handle, XcIO 38.29 38.30 /* 38.31 * Safety checking of saved context: 38.32 - * 1. cpu_ctxt is fine, as Xen checks that on context switch. 38.33 + * 1. user_regs is fine, as Xen checks that on context switch. 38.34 * 2. fpu_ctxt is fine, as it can't hurt Xen. 38.35 * 3. trap_ctxt needs the code selectors checked. 38.36 * 4. fast_trap_idx is checked by Xen.
39.1 --- a/tools/libxc/xc_linux_save.c Thu Apr 28 13:54:01 2005 +0000 39.2 +++ b/tools/libxc/xc_linux_save.c Fri Apr 29 07:34:47 2005 +0000 39.3 @@ -325,7 +325,7 @@ static int analysis_phase( int xc_handle 39.4 39.5 int suspend_and_state(int xc_handle, XcIOContext *ioctxt, 39.6 xc_domaininfo_t *info, 39.7 - full_execution_context_t *ctxt) 39.8 + vcpu_guest_context_t *ctxt) 39.9 { 39.10 int i=0; 39.11 39.12 @@ -391,7 +391,7 @@ int xc_linux_save(int xc_handle, XcIOCon 39.13 unsigned long shared_info_frame; 39.14 39.15 /* A copy of the CPU context of the guest. */ 39.16 - full_execution_context_t ctxt; 39.17 + vcpu_guest_context_t ctxt; 39.18 39.19 /* A table containg the type of each PFN (/not/ MFN!). */ 39.20 unsigned long *pfn_type = NULL; 39.21 @@ -922,7 +922,7 @@ int xc_linux_save(int xc_handle, XcIOCon 39.22 "SUSPEND flags %08u shinfo %08lx eip %08u " 39.23 "esi %08u\n",info.flags, 39.24 info.shared_info_frame, 39.25 - ctxt.cpu_ctxt.eip, ctxt.cpu_ctxt.esi ); 39.26 + ctxt.user_regs.eip, ctxt.user_regs.esi ); 39.27 } 39.28 39.29 if ( xc_shadow_control( xc_handle, domid, 39.30 @@ -995,7 +995,7 @@ int xc_linux_save(int xc_handle, XcIOCon 39.31 domid for this to succeed. */ 39.32 p_srec = xc_map_foreign_range(xc_handle, domid, 39.33 sizeof(*p_srec), PROT_READ, 39.34 - ctxt.cpu_ctxt.esi); 39.35 + ctxt.user_regs.esi); 39.36 if (!p_srec){ 39.37 xcio_error(ioctxt, "Couldn't map suspend record"); 39.38 goto out; 39.39 @@ -1009,7 +1009,7 @@ int xc_linux_save(int xc_handle, XcIOCon 39.40 } 39.41 39.42 /* Canonicalise the suspend-record frame number. */ 39.43 - if ( !translate_mfn_to_pfn(&ctxt.cpu_ctxt.esi) ){ 39.44 + if ( !translate_mfn_to_pfn(&ctxt.user_regs.esi) ){ 39.45 xcio_error(ioctxt, "Suspend record is not in range of pseudophys map"); 39.46 goto out; 39.47 }
40.1 --- a/tools/libxc/xc_plan9_build.c Thu Apr 28 13:54:01 2005 +0000 40.2 +++ b/tools/libxc/xc_plan9_build.c Fri Apr 29 07:34:47 2005 +0000 40.3 @@ -113,7 +113,7 @@ setup_guest(int xc_handle, 40.4 unsigned long tot_pages, 40.5 unsigned long *virt_startinfo_addr, 40.6 unsigned long *virt_load_addr, 40.7 - full_execution_context_t * ctxt, 40.8 + vcpu_guest_context_t * ctxt, 40.9 const char *cmdline, 40.10 unsigned long shared_info_frame, 40.11 unsigned int control_evtchn, 40.12 @@ -411,7 +411,7 @@ xc_plan9_build(int xc_handle, 40.13 int kernel_fd = -1; 40.14 gzFile kernel_gfd = NULL; 40.15 int rc, i; 40.16 - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; 40.17 + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; 40.18 unsigned long virt_startinfo_addr; 40.19 40.20 if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) { 40.21 @@ -482,23 +482,23 @@ xc_plan9_build(int xc_handle, 40.22 * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 40.23 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1) 40.24 */ 40.25 - ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS; 40.26 - ctxt->cpu_ctxt.es = FLAT_KERNEL_DS; 40.27 - ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS; 40.28 - ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS; 40.29 - ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS; 40.30 - ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS; 40.31 - ctxt->cpu_ctxt.eip = load_addr; 40.32 - ctxt->cpu_ctxt.eip = 0x80100020; 40.33 + ctxt->user_regs.ds = FLAT_KERNEL_DS; 40.34 + ctxt->user_regs.es = FLAT_KERNEL_DS; 40.35 + ctxt->user_regs.fs = FLAT_KERNEL_DS; 40.36 + ctxt->user_regs.gs = FLAT_KERNEL_DS; 40.37 + ctxt->user_regs.ss = FLAT_KERNEL_DS; 40.38 + ctxt->user_regs.cs = FLAT_KERNEL_CS; 40.39 + ctxt->user_regs.eip = load_addr; 40.40 + ctxt->user_regs.eip = 0x80100020; 40.41 /* put stack at top of second page */ 40.42 - ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT); 40.43 + ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT); 40.44 40.45 /* why is this set? */ 40.46 - ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp; 40.47 - ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2); 40.48 + ctxt->user_regs.esi = ctxt->user_regs.esp; 40.49 + ctxt->user_regs.eflags = (1 << 9) | (1 << 2); 40.50 40.51 /* FPU is set up to default initial state. */ 40.52 - memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt)); 40.53 + memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 40.54 40.55 /* Virtual IDT is empty at start-of-day. */ 40.56 for (i = 0; i < 256; i++) { 40.57 @@ -519,7 +519,7 @@ xc_plan9_build(int xc_handle, 40.58 /* Ring 1 stack is the initial stack. */ 40.59 /* put stack at top of second page */ 40.60 ctxt->kernel_ss = FLAT_KERNEL_DS; 40.61 - ctxt->kernel_esp = ctxt->cpu_ctxt.esp; 40.62 + ctxt->kernel_sp = ctxt->user_regs.esp; 40.63 40.64 /* No debugging. */ 40.65 memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
41.1 --- a/tools/libxc/xc_ptrace.c Thu Apr 28 13:54:01 2005 +0000 41.2 +++ b/tools/libxc/xc_ptrace.c Fri Apr 29 07:34:47 2005 +0000 41.3 @@ -132,7 +132,7 @@ static long nr_pages = 0; 41.4 unsigned long *page_array = NULL; 41.5 static int regs_valid[MAX_VIRT_CPUS]; 41.6 static unsigned long cr3[MAX_VIRT_CPUS]; 41.7 -static full_execution_context_t ctxt[MAX_VIRT_CPUS]; 41.8 +static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS]; 41.9 41.10 /* --------------------- */ 41.11 41.12 @@ -220,7 +220,7 @@ waitdomain(int domain, int *status, int 41.13 { 41.14 dom0_op_t op; 41.15 int retval; 41.16 - full_execution_context_t ctxt; 41.17 + vcpu_guest_context_t ctxt; 41.18 struct timespec ts; 41.19 ts.tv_sec = 0; 41.20 ts.tv_nsec = 10*1000*1000; 41.21 @@ -300,7 +300,7 @@ xc_ptrace(enum __ptrace_request request, 41.22 FETCH_REGS(cpu); 41.23 41.24 if (request == PTRACE_GETREGS) { 41.25 - SET_PT_REGS(pt, ctxt[cpu].cpu_ctxt); 41.26 + SET_PT_REGS(pt, ctxt[cpu].user_regs); 41.27 memcpy(data, &pt, sizeof(elf_gregset_t)); 41.28 } else if (request == PTRACE_GETFPREGS) 41.29 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 41.30 @@ -309,7 +309,7 @@ xc_ptrace(enum __ptrace_request request, 41.31 break; 41.32 case PTRACE_SETREGS: 41.33 op.cmd = DOM0_SETDOMAININFO; 41.34 - SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].cpu_ctxt); 41.35 + SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].user_regs); 41.36 op.u.setdomaininfo.domain = domid; 41.37 /* XXX need to understand multiple exec_domains */ 41.38 op.u.setdomaininfo.exec_domain = cpu; 41.39 @@ -339,7 +339,7 @@ xc_ptrace(enum __ptrace_request request, 41.40 retval = do_dom0_op(xc_handle, &op); 41.41 break; 41.42 case PTRACE_SINGLESTEP: 41.43 - ctxt[VCPU].cpu_ctxt.eflags |= PSL_T; 41.44 + ctxt[VCPU].user_regs.eflags |= PSL_T; 41.45 op.cmd = DOM0_SETDOMAININFO; 41.46 op.u.setdomaininfo.domain = domid; 41.47 op.u.setdomaininfo.exec_domain = 0; 41.48 @@ -355,8 +355,8 @@ xc_ptrace(enum __ptrace_request request, 41.49 if (request != PTRACE_SINGLESTEP) { 41.50 FETCH_REGS(cpu); 41.51 /* Clear trace flag */ 41.52 - if (ctxt[cpu].cpu_ctxt.eflags & PSL_T) { 41.53 - ctxt[cpu].cpu_ctxt.eflags &= ~PSL_T; 41.54 + if (ctxt[cpu].user_regs.eflags & PSL_T) { 41.55 + ctxt[cpu].user_regs.eflags &= ~PSL_T; 41.56 op.cmd = DOM0_SETDOMAININFO; 41.57 op.u.setdomaininfo.domain = domid; 41.58 op.u.setdomaininfo.exec_domain = cpu;
42.1 --- a/tools/libxc/xc_vmx_build.c Thu Apr 28 13:54:01 2005 +0000 42.2 +++ b/tools/libxc/xc_vmx_build.c Fri Apr 29 07:34:47 2005 +0000 42.3 @@ -149,7 +149,7 @@ static int setup_guest(int xc_handle, 42.4 char *image, unsigned long image_size, 42.5 gzFile initrd_gfd, unsigned long initrd_len, 42.6 unsigned long nr_pages, 42.7 - full_execution_context_t *ctxt, 42.8 + vcpu_guest_context_t *ctxt, 42.9 const char *cmdline, 42.10 unsigned long shared_info_frame, 42.11 unsigned int control_evtchn, 42.12 @@ -422,22 +422,22 @@ static int setup_guest(int xc_handle, 42.13 /* 42.14 * Initial register values: 42.15 */ 42.16 - ctxt->cpu_ctxt.ds = 0x68; 42.17 - ctxt->cpu_ctxt.es = 0x0; 42.18 - ctxt->cpu_ctxt.fs = 0x0; 42.19 - ctxt->cpu_ctxt.gs = 0x0; 42.20 - ctxt->cpu_ctxt.ss = 0x68; 42.21 - ctxt->cpu_ctxt.cs = 0x60; 42.22 - ctxt->cpu_ctxt.eip = dsi.v_kernentry; 42.23 - ctxt->cpu_ctxt.edx = vboot_gdt_start; 42.24 - ctxt->cpu_ctxt.eax = 0x800; 42.25 - ctxt->cpu_ctxt.esp = vboot_gdt_end; 42.26 - ctxt->cpu_ctxt.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */ 42.27 - ctxt->cpu_ctxt.ecx = mem_mapp->nr_map; 42.28 - ctxt->cpu_ctxt.esi = vboot_params_start; 42.29 - ctxt->cpu_ctxt.edi = vboot_params_start + 0x2d0; 42.30 + ctxt->user_regs.ds = 0x68; 42.31 + ctxt->user_regs.es = 0x0; 42.32 + ctxt->user_regs.fs = 0x0; 42.33 + ctxt->user_regs.gs = 0x0; 42.34 + ctxt->user_regs.ss = 0x68; 42.35 + ctxt->user_regs.cs = 0x60; 42.36 + ctxt->user_regs.eip = dsi.v_kernentry; 42.37 + ctxt->user_regs.edx = vboot_gdt_start; 42.38 + ctxt->user_regs.eax = 0x800; 42.39 + ctxt->user_regs.esp = vboot_gdt_end; 42.40 + ctxt->user_regs.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */ 42.41 + ctxt->user_regs.ecx = mem_mapp->nr_map; 42.42 + ctxt->user_regs.esi = vboot_params_start; 42.43 + ctxt->user_regs.edi = vboot_params_start + 0x2d0; 42.44 42.45 - ctxt->cpu_ctxt.eflags = (1<<2); 42.46 + ctxt->user_regs.eflags = (1<<2); 42.47 42.48 return 0; 42.49 42.50 @@ -488,7 +488,7 @@ int xc_vmx_build(int xc_handle, 42.51 int initrd_fd = -1; 42.52 gzFile initrd_gfd = NULL; 42.53 int rc, i; 42.54 - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; 42.55 + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; 42.56 unsigned long nr_pages; 42.57 char *image = NULL; 42.58 unsigned long image_size, initrd_size=0; 42.59 @@ -565,9 +565,9 @@ int xc_vmx_build(int xc_handle, 42.60 if ( image != NULL ) 42.61 free(image); 42.62 42.63 - ctxt->flags = ECF_VMX_GUEST; 42.64 + ctxt->flags = VGCF_VMX_GUEST; 42.65 /* FPU is set up to default initial state. */ 42.66 - memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 42.67 + memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 42.68 42.69 /* Virtual IDT is empty at start-of-day. */ 42.70 for ( i = 0; i < 256; i++ ) 42.71 @@ -588,8 +588,8 @@ int xc_vmx_build(int xc_handle, 42.72 42.73 /* Ring 1 stack is the initial stack. */ 42.74 /* 42.75 - ctxt->kernel_ss = FLAT_KERNEL_DS; 42.76 - ctxt->kernel_esp = vstartinfo_start; 42.77 + ctxt->kernel_ss = FLAT_KERNEL_DS; 42.78 + ctxt->kernel_sp = vstartinfo_start; 42.79 */ 42.80 /* No debugging. */ 42.81 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
43.1 --- a/tools/xentrace/xenctx.c Thu Apr 28 13:54:01 2005 +0000 43.2 +++ b/tools/xentrace/xenctx.c Fri Apr 29 07:34:47 2005 +0000 43.3 @@ -24,27 +24,26 @@ 43.4 #include "xc.h" 43.5 43.6 #ifdef __i386__ 43.7 -void 43.8 -print_ctx(full_execution_context_t *ctx1) 43.9 +void print_ctx(vcpu_guest_context_t *ctx1) 43.10 { 43.11 - execution_context_t *ctx = &ctx1->cpu_ctxt; 43.12 + struct cpu_user_regs *regs = &ctx1->user_regs; 43.13 43.14 - printf("eip: %08lx\t", ctx->eip); 43.15 - printf("esp: %08lx\n", ctx->esp); 43.16 + printf("eip: %08lx\t", regs->eip); 43.17 + printf("esp: %08lx\n", regs->esp); 43.18 43.19 - printf("eax: %08lx\t", ctx->eax); 43.20 - printf("ebx: %08lx\t", ctx->ebx); 43.21 - printf("ecx: %08lx\t", ctx->ecx); 43.22 - printf("edx: %08lx\n", ctx->edx); 43.23 + printf("eax: %08lx\t", regs->eax); 43.24 + printf("ebx: %08lx\t", regs->ebx); 43.25 + printf("ecx: %08lx\t", regs->ecx); 43.26 + printf("edx: %08lx\n", regs->edx); 43.27 43.28 - printf("esi: %08lx\t", ctx->esi); 43.29 - printf("edi: %08lx\t", ctx->edi); 43.30 - printf("ebp: %08lx\n", ctx->ebp); 43.31 + printf("esi: %08lx\t", regs->esi); 43.32 + printf("edi: %08lx\t", regs->edi); 43.33 + printf("ebp: %08lx\n", regs->ebp); 43.34 43.35 - printf(" cs: %08lx\t", ctx->cs); 43.36 - printf(" ds: %08lx\t", ctx->ds); 43.37 - printf(" fs: %08lx\t", ctx->fs); 43.38 - printf(" gs: %08lx\n", ctx->gs); 43.39 + printf(" cs: %08lx\t", regs->cs); 43.40 + printf(" ds: %08lx\t", regs->ds); 43.41 + printf(" fs: %08lx\t", regs->fs); 43.42 + printf(" gs: %08lx\n", regs->gs); 43.43 43.44 } 43.45 #endif 43.46 @@ -53,7 +52,7 @@ void dump_ctx(u32 domid, u32 vcpu) 43.47 { 43.48 int ret; 43.49 xc_domaininfo_t info; 43.50 - full_execution_context_t ctx; 43.51 + vcpu_guest_context_t ctx; 43.52 43.53 int xc_handle = xc_interface_open(); /* for accessing control interface */ 43.54
44.1 --- a/xen/arch/ia64/dom0_ops.c Thu Apr 28 13:54:01 2005 +0000 44.2 +++ b/xen/arch/ia64/dom0_ops.c Fri Apr 29 07:34:47 2005 +0000 44.3 @@ -47,7 +47,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 44.4 return ret; 44.5 } 44.6 44.7 -void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c) 44.8 +void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c) 44.9 { 44.10 int i; 44.11
45.1 --- a/xen/arch/ia64/domain.c Thu Apr 28 13:54:01 2005 +0000 45.2 +++ b/xen/arch/ia64/domain.c Fri Apr 29 07:34:47 2005 +0000 45.3 @@ -199,13 +199,13 @@ void arch_do_boot_vcpu(struct exec_domai 45.4 return; 45.5 } 45.6 45.7 -int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c) 45.8 +int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c) 45.9 { 45.10 dummy(); 45.11 return 1; 45.12 } 45.13 45.14 -int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c) 45.15 +int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c) 45.16 { 45.17 dummy(); 45.18 return 1;
46.1 --- a/xen/arch/ia64/xenmisc.c Thu Apr 28 13:54:01 2005 +0000 46.2 +++ b/xen/arch/ia64/xenmisc.c Fri Apr 29 07:34:47 2005 +0000 46.3 @@ -66,7 +66,7 @@ void grant_table_destroy(struct domain * 46.4 return; 46.5 } 46.6 46.7 -struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); } 46.8 +struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); } 46.9 46.10 void raise_actimer_softirq(void) 46.11 { 46.12 @@ -278,6 +278,11 @@ if (!i--) { printk("+",id); cnt[id] = 10 46.13 if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 46.14 } 46.15 46.16 +void continue_running(struct exec_domain *same) 46.17 +{ 46.18 + /* nothing to do */ 46.19 +} 46.20 + 46.21 void panic_domain(struct pt_regs *regs, const char *fmt, ...) 46.22 { 46.23 va_list args;
47.1 --- a/xen/arch/x86/apic.c Thu Apr 28 13:54:01 2005 +0000 47.2 +++ b/xen/arch/x86/apic.c Fri Apr 29 07:34:47 2005 +0000 47.3 @@ -825,7 +825,7 @@ int reprogram_ac_timer(s_time_t timeout) 47.4 return 1; 47.5 } 47.6 47.7 -void smp_apic_timer_interrupt(struct xen_regs * regs) 47.8 +void smp_apic_timer_interrupt(struct cpu_user_regs * regs) 47.9 { 47.10 ack_APIC_irq(); 47.11 perfc_incrc(apic_timer); 47.12 @@ -835,7 +835,7 @@ void smp_apic_timer_interrupt(struct xen 47.13 /* 47.14 * This interrupt should _never_ happen with our APIC/SMP architecture 47.15 */ 47.16 -asmlinkage void smp_spurious_interrupt(struct xen_regs *regs) 47.17 +asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs) 47.18 { 47.19 unsigned long v; 47.20 47.21 @@ -857,7 +857,7 @@ asmlinkage void smp_spurious_interrupt(s 47.22 * This interrupt should never happen with our APIC/SMP architecture 47.23 */ 47.24 47.25 -asmlinkage void smp_error_interrupt(struct xen_regs *regs) 47.26 +asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs) 47.27 { 47.28 unsigned long v, v1; 47.29
48.1 --- a/xen/arch/x86/boot/x86_32.S Thu Apr 28 13:54:01 2005 +0000 48.2 +++ b/xen/arch/x86/boot/x86_32.S Fri Apr 29 07:34:47 2005 +0000 48.3 @@ -24,10 +24,10 @@ bad_cpu_msg: 48.4 not_multiboot_msg: 48.5 .asciz "ERR: Not a Multiboot bootloader!" 48.6 bad_cpu: 48.7 - mov $SYMBOL_NAME(bad_cpu_msg)-__PAGE_OFFSET,%esi 48.8 + mov $bad_cpu_msg-__PAGE_OFFSET,%esi 48.9 jmp print_err 48.10 not_multiboot: 48.11 - mov $SYMBOL_NAME(not_multiboot_msg)-__PAGE_OFFSET,%esi 48.12 + mov $not_multiboot_msg-__PAGE_OFFSET,%esi 48.13 print_err: 48.14 mov $0xB8000,%edi # VGA framebuffer 48.15 1: mov (%esi),%bl 48.16 @@ -118,7 +118,7 @@ 1: stosl /* low mappings cover as 48.17 mov $(__HYPERVISOR_CS << 16),%eax 48.18 mov %dx,%ax /* selector = 0x0010 = cs */ 48.19 mov $0x8E00,%dx /* interrupt gate - dpl=0, present */ 48.20 - lea SYMBOL_NAME(idt_table)-__PAGE_OFFSET,%edi 48.21 + lea idt_table-__PAGE_OFFSET,%edi 48.22 mov $256,%ecx 48.23 1: mov %eax,(%edi) 48.24 mov %edx,4(%edi) 48.25 @@ -163,38 +163,38 @@ ignore_int: 48.26 mov %eax,%ds 48.27 mov %eax,%es 48.28 pushl $int_msg 48.29 - call SYMBOL_NAME(printf) 48.30 + call printf 48.31 1: jmp 1b 48.32 48.33 /*** STACK LOCATION ***/ 48.34 48.35 ENTRY(stack_start) 48.36 - .long SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 - __PAGE_OFFSET 48.37 + .long cpu0_stack + STACK_SIZE - 200 - __PAGE_OFFSET 48.38 .long __HYPERVISOR_DS 48.39 48.40 /*** DESCRIPTOR TABLES ***/ 48.41 48.42 -.globl SYMBOL_NAME(idt) 48.43 -.globl SYMBOL_NAME(gdt) 48.44 +.globl idt 48.45 +.globl gdt 48.46 48.47 ALIGN 48.48 48.49 .word 0 48.50 idt_descr: 48.51 .word 256*8-1 48.52 -SYMBOL_NAME(idt): 48.53 - .long SYMBOL_NAME(idt_table) 48.54 +idt: 48.55 + .long idt_table 48.56 48.57 .word 0 48.58 gdt_descr: 48.59 .word (LAST_RESERVED_GDT_ENTRY*8)+7 48.60 -SYMBOL_NAME(gdt): 48.61 - .long SYMBOL_NAME(gdt_table) /* gdt base */ 48.62 +gdt: 48.63 + .long gdt_table /* gdt base */ 48.64 48.65 .word 0 48.66 nopaging_gdt_descr: 48.67 .word (LAST_RESERVED_GDT_ENTRY*8)+7 48.68 - .long SYMBOL_NAME(gdt_table)-__PAGE_OFFSET 48.69 + .long gdt_table-__PAGE_OFFSET 48.70 48.71 ALIGN 48.72 /* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
49.1 --- a/xen/arch/x86/boot/x86_64.S Thu Apr 28 13:54:01 2005 +0000 49.2 +++ b/xen/arch/x86/boot/x86_64.S Fri Apr 29 07:34:47 2005 +0000 49.3 @@ -7,10 +7,10 @@ 49.4 49.5 .text 49.6 .code32 49.7 - 49.8 + 49.9 ENTRY(start) 49.10 jmp __start 49.11 - 49.12 + 49.13 .org 0x004 49.14 /*** MULTIBOOT HEADER ****/ 49.15 /* Magic number indicating a Multiboot header. */ 49.16 @@ -180,8 +180,8 @@ 1: movq %rax,(%rdi) 49.17 49.18 /*** DESCRIPTOR TABLES ***/ 49.19 49.20 -.globl SYMBOL_NAME(idt) 49.21 -.globl SYMBOL_NAME(gdt) 49.22 +.globl idt 49.23 +.globl gdt 49.24 49.25 .org 0x1f0 49.26 .word (LAST_RESERVED_GDT_ENTRY*8)+7 49.27 @@ -203,17 +203,17 @@ ENTRY(gdt_table) 49.28 .word 0 49.29 gdt_descr: 49.30 .word (LAST_RESERVED_GDT_ENTRY*8)+7 49.31 -SYMBOL_NAME(gdt): 49.32 - .quad SYMBOL_NAME(gdt_table) 49.33 +gdt: 49.34 + .quad gdt_table 49.35 49.36 .word 0 49.37 idt_descr: 49.38 .word 256*16-1 49.39 -SYMBOL_NAME(idt): 49.40 - .quad SYMBOL_NAME(idt_table) 49.41 +idt: 49.42 + .quad idt_table 49.43 49.44 ENTRY(stack_start) 49.45 - .quad SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 49.46 + .quad cpu0_stack + STACK_SIZE - 200 49.47 49.48 high_start: 49.49 .quad __high_start 49.50 @@ -258,6 +258,6 @@ int_msg: 49.51 ignore_int: 49.52 cld 49.53 leaq int_msg(%rip),%rdi 49.54 - call SYMBOL_NAME(printf) 49.55 + call printf 49.56 1: jmp 1b 49.57
50.1 --- a/xen/arch/x86/cdb.c Thu Apr 28 13:54:01 2005 +0000 50.2 +++ b/xen/arch/x86/cdb.c Fri Apr 29 07:34:47 2005 +0000 50.3 @@ -214,7 +214,7 @@ xendbg_send_reply(const char *buf, struc 50.4 } 50.5 50.6 static int 50.7 -handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx) 50.8 +handle_register_read_command(struct cpu_user_regs *regs, struct xendbg_context *ctx) 50.9 { 50.10 char buf[121]; 50.11 50.12 @@ -240,7 +240,7 @@ handle_register_read_command(struct xen_ 50.13 } 50.14 50.15 static int 50.16 -process_command(char *received_packet, struct xen_regs *regs, 50.17 +process_command(char *received_packet, struct cpu_user_regs *regs, 50.18 struct xendbg_context *ctx) 50.19 { 50.20 char *ptr; 50.21 @@ -318,7 +318,7 @@ xdb_ctx = { 50.22 }; 50.23 50.24 int 50.25 -__trap_to_cdb(struct xen_regs *regs) 50.26 +__trap_to_cdb(struct cpu_user_regs *regs) 50.27 { 50.28 int resume = 0; 50.29 int r;
51.1 --- a/xen/arch/x86/dom0_ops.c Thu Apr 28 13:54:01 2005 +0000 51.2 +++ b/xen/arch/x86/dom0_ops.c Fri Apr 29 07:34:47 2005 +0000 51.3 @@ -374,54 +374,44 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 51.4 } 51.5 51.6 void arch_getdomaininfo_ctxt( 51.7 - struct exec_domain *ed, full_execution_context_t *c) 51.8 + struct exec_domain *ed, struct vcpu_guest_context *c) 51.9 { 51.10 int i; 51.11 #ifdef __i386__ /* Remove when x86_64 VMX is implemented */ 51.12 #ifdef CONFIG_VMX 51.13 - extern void save_vmx_execution_context(execution_context_t *); 51.14 + extern void save_vmx_cpu_user_regs(struct cpu_user_regs *); 51.15 #endif 51.16 #endif 51.17 51.18 - c->flags = 0; 51.19 - memcpy(&c->cpu_ctxt, 51.20 - &ed->arch.user_ctxt, 51.21 - sizeof(ed->arch.user_ctxt)); 51.22 + memcpy(c, &ed->arch.guest_context, sizeof(*c)); 51.23 + 51.24 /* IOPL privileges are virtualised -- merge back into returned eflags. */ 51.25 - BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0); 51.26 - c->cpu_ctxt.eflags |= ed->arch.iopl << 12; 51.27 + BUG_ON((c->user_regs.eflags & EF_IOPL) != 0); 51.28 + c->user_regs.eflags |= ed->arch.iopl << 12; 51.29 51.30 #ifdef __i386__ 51.31 #ifdef CONFIG_VMX 51.32 if ( VMX_DOMAIN(ed) ) 51.33 - save_vmx_execution_context(&c->cpu_ctxt); 51.34 + save_vmx_cpu_user_regs(&c->user_regs); 51.35 #endif 51.36 #endif 51.37 51.38 + c->flags = 0; 51.39 if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) ) 51.40 - c->flags |= ECF_I387_VALID; 51.41 - if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) ) 51.42 - c->flags |= ECF_IN_KERNEL; 51.43 + c->flags |= VGCF_I387_VALID; 51.44 + if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) ) 51.45 + c->flags |= VGCF_IN_KERNEL; 51.46 #ifdef CONFIG_VMX 51.47 if (VMX_DOMAIN(ed)) 51.48 - c->flags |= ECF_VMX_GUEST; 51.49 + c->flags |= VGCF_VMX_GUEST; 51.50 #endif 51.51 - memcpy(&c->fpu_ctxt, 51.52 - &ed->arch.i387, 51.53 - sizeof(ed->arch.i387)); 51.54 - memcpy(&c->trap_ctxt, 51.55 - ed->arch.traps, 51.56 - sizeof(ed->arch.traps)); 51.57 + 51.58 #ifdef ARCH_HAS_FAST_TRAP 51.59 if ( (ed->arch.fast_trap_desc.a == 0) && 51.60 (ed->arch.fast_trap_desc.b == 0) ) 51.61 c->fast_trap_idx = 0; 51.62 - else 51.63 - c->fast_trap_idx = 51.64 - ed->arch.fast_trap_idx; 51.65 #endif 51.66 - c->ldt_base = ed->arch.ldt_base; 51.67 - c->ldt_ents = ed->arch.ldt_ents; 51.68 + 51.69 c->gdt_ents = 0; 51.70 if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) ) 51.71 { 51.72 @@ -430,22 +420,8 @@ void arch_getdomaininfo_ctxt( 51.73 l1e_get_pfn(ed->arch.perdomain_ptes[i]); 51.74 c->gdt_ents = GET_GDT_ENTRIES(ed); 51.75 } 51.76 - c->kernel_ss = ed->arch.kernel_ss; 51.77 - c->kernel_esp = ed->arch.kernel_sp; 51.78 - c->pt_base = 51.79 - pagetable_val(ed->arch.guest_table); 51.80 - memcpy(c->debugreg, 51.81 - ed->arch.debugreg, 51.82 - sizeof(ed->arch.debugreg)); 51.83 -#if defined(__i386__) 51.84 - c->event_callback_cs = ed->arch.event_selector; 51.85 - c->event_callback_eip = ed->arch.event_address; 51.86 - c->failsafe_callback_cs = ed->arch.failsafe_selector; 51.87 - c->failsafe_callback_eip = ed->arch.failsafe_address; 51.88 -#elif defined(__x86_64__) 51.89 - c->event_callback_eip = ed->arch.event_address; 51.90 - c->failsafe_callback_eip = ed->arch.failsafe_address; 51.91 - c->syscall_callback_eip = ed->arch.syscall_address; 51.92 -#endif 51.93 + 51.94 + c->pt_base = pagetable_val(ed->arch.guest_table); 51.95 + 51.96 c->vm_assist = ed->domain->vm_assist; 51.97 }
52.1 --- a/xen/arch/x86/domain.c Thu Apr 28 13:54:01 2005 +0000 52.2 +++ b/xen/arch/x86/domain.c Fri Apr 29 07:34:47 2005 +0000 52.3 @@ -50,6 +50,16 @@ struct percpu_ctxt { 52.4 } __cacheline_aligned; 52.5 static struct percpu_ctxt percpu_ctxt[NR_CPUS]; 52.6 52.7 +static void continue_idle_task(struct exec_domain *ed) 52.8 +{ 52.9 + reset_stack_and_jump(idle_loop); 52.10 +} 52.11 + 52.12 +static void continue_nonidle_task(struct exec_domain *ed) 52.13 +{ 52.14 + reset_stack_and_jump(ret_from_intr); 52.15 +} 52.16 + 52.17 static void default_idle(void) 52.18 { 52.19 local_irq_disable(); 52.20 @@ -74,24 +84,32 @@ static __attribute_used__ void idle_loop 52.21 } 52.22 } 52.23 52.24 +static void __startup_cpu_idle_loop(struct exec_domain *ed) 52.25 +{ 52.26 + /* Signal to boot CPU that we are done. */ 52.27 + init_idle(); 52.28 + 52.29 + /* Start normal idle loop. */ 52.30 + ed->arch.schedule_tail = continue_idle_task; 52.31 + reset_stack_and_jump(idle_loop); 52.32 +} 52.33 + 52.34 void startup_cpu_idle_loop(void) 52.35 { 52.36 + struct exec_domain *ed = current; 52.37 + 52.38 /* Just some sanity to ensure that the scheduler is set up okay. */ 52.39 - ASSERT(current->domain->id == IDLE_DOMAIN_ID); 52.40 - percpu_ctxt[smp_processor_id()].curr_ed = current; 52.41 - set_bit(smp_processor_id(), ¤t->domain->cpuset); 52.42 - domain_unpause_by_systemcontroller(current->domain); 52.43 + ASSERT(ed->domain->id == IDLE_DOMAIN_ID); 52.44 + percpu_ctxt[smp_processor_id()].curr_ed = ed; 52.45 + set_bit(smp_processor_id(), &ed->domain->cpuset); 52.46 + domain_unpause_by_systemcontroller(ed->domain); 52.47 + 52.48 + ed->arch.schedule_tail = __startup_cpu_idle_loop; 52.49 raise_softirq(SCHEDULE_SOFTIRQ); 52.50 do_softirq(); 52.51 52.52 - /* 52.53 - * Declares CPU setup done to the boot processor. 52.54 - * Therefore memory barrier to ensure state is visible. 52.55 - */ 52.56 - smp_mb(); 52.57 - init_idle(); 52.58 - 52.59 - idle_loop(); 52.60 + /* End up in __startup_cpu_idle_loop, not here. */ 52.61 + BUG(); 52.62 } 52.63 52.64 static long no_idt[2]; 52.65 @@ -219,16 +237,6 @@ void free_perdomain_pt(struct domain *d) 52.66 #endif 52.67 } 52.68 52.69 -static void continue_idle_task(struct exec_domain *ed) 52.70 -{ 52.71 - reset_stack_and_jump(idle_loop); 52.72 -} 52.73 - 52.74 -static void continue_nonidle_task(struct exec_domain *ed) 52.75 -{ 52.76 - reset_stack_and_jump(ret_from_intr); 52.77 -} 52.78 - 52.79 void arch_do_createdomain(struct exec_domain *ed) 52.80 { 52.81 struct domain *d = ed->domain; 52.82 @@ -237,11 +245,7 @@ void arch_do_createdomain(struct exec_do 52.83 52.84 ed->arch.flags = TF_kernel_mode; 52.85 52.86 - if ( d->id == IDLE_DOMAIN_ID ) 52.87 - { 52.88 - ed->arch.schedule_tail = continue_idle_task; 52.89 - } 52.90 - else 52.91 + if ( d->id != IDLE_DOMAIN_ID ) 52.92 { 52.93 ed->arch.schedule_tail = continue_nonidle_task; 52.94 52.95 @@ -312,14 +316,14 @@ void arch_vmx_do_launch(struct exec_doma 52.96 reset_stack_and_jump(vmx_asm_do_launch); 52.97 } 52.98 52.99 -static int vmx_final_setup_guest(struct exec_domain *ed, 52.100 - full_execution_context_t *full_context) 52.101 +static int vmx_final_setup_guest( 52.102 + struct exec_domain *ed, struct vcpu_guest_context *ctxt) 52.103 { 52.104 int error; 52.105 - execution_context_t *context; 52.106 + struct cpu_user_regs *regs; 52.107 struct vmcs_struct *vmcs; 52.108 52.109 - context = &full_context->cpu_ctxt; 52.110 + regs = &ctxt->user_regs; 52.111 52.112 /* 52.113 * Create a new VMCS 52.114 @@ -333,7 +337,7 @@ static int vmx_final_setup_guest(struct 52.115 52.116 ed->arch.arch_vmx.vmcs = vmcs; 52.117 error = construct_vmcs( 52.118 - &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); 52.119 + &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV); 52.120 if ( error < 0 ) 52.121 { 52.122 printk("Failed to construct a new VMCS\n"); 52.123 @@ -345,7 +349,7 @@ static int vmx_final_setup_guest(struct 52.124 52.125 #if defined (__i386) 52.126 ed->arch.arch_vmx.vmx_platform.real_mode_data = 52.127 - (unsigned long *) context->esi; 52.128 + (unsigned long *) regs->esi; 52.129 #endif 52.130 52.131 if (ed == ed->domain->exec_domain[0]) { 52.132 @@ -374,7 +378,7 @@ out: 52.133 52.134 /* This is called by arch_final_setup_guest and do_boot_vcpu */ 52.135 int arch_set_info_guest( 52.136 - struct exec_domain *ed, full_execution_context_t *c) 52.137 + struct exec_domain *ed, struct vcpu_guest_context *c) 52.138 { 52.139 struct domain *d = ed->domain; 52.140 unsigned long phys_basetab; 52.141 @@ -385,65 +389,42 @@ int arch_set_info_guest( 52.142 * #GP. If DS, ES, FS, GS are DPL 0 then they'll be cleared automatically. 52.143 * If SS RPL or DPL differs from CS RPL then we'll #GP. 52.144 */ 52.145 - if (!(c->flags & ECF_VMX_GUEST)) 52.146 - if ( ((c->cpu_ctxt.cs & 3) == 0) || 52.147 - ((c->cpu_ctxt.ss & 3) == 0) ) 52.148 + if ( !(c->flags & VGCF_VMX_GUEST) ) 52.149 + { 52.150 + if ( ((c->user_regs.cs & 3) == 0) || 52.151 + ((c->user_regs.ss & 3) == 0) ) 52.152 return -EINVAL; 52.153 + } 52.154 52.155 clear_bit(EDF_DONEFPUINIT, &ed->ed_flags); 52.156 - if ( c->flags & ECF_I387_VALID ) 52.157 + if ( c->flags & VGCF_I387_VALID ) 52.158 set_bit(EDF_DONEFPUINIT, &ed->ed_flags); 52.159 52.160 ed->arch.flags &= ~TF_kernel_mode; 52.161 - if ( c->flags & ECF_IN_KERNEL ) 52.162 + if ( c->flags & VGCF_IN_KERNEL ) 52.163 ed->arch.flags |= TF_kernel_mode; 52.164 52.165 - memcpy(&ed->arch.user_ctxt, 52.166 - &c->cpu_ctxt, 52.167 - sizeof(ed->arch.user_ctxt)); 52.168 - 52.169 - memcpy(&ed->arch.i387, 52.170 - &c->fpu_ctxt, 52.171 - sizeof(ed->arch.i387)); 52.172 + memcpy(&ed->arch.guest_context, c, sizeof(*c)); 52.173 52.174 /* IOPL privileges are virtualised. */ 52.175 - ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3; 52.176 - ed->arch.user_ctxt.eflags &= ~EF_IOPL; 52.177 + ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3; 52.178 + ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL; 52.179 52.180 /* Clear IOPL for unprivileged domains. */ 52.181 - if (!IS_PRIV(d)) 52.182 - ed->arch.user_ctxt.eflags &= 0xffffcfff; 52.183 + if ( !IS_PRIV(d) ) 52.184 + ed->arch.guest_context.user_regs.eflags &= 0xffffcfff; 52.185 52.186 - if (test_bit(EDF_DONEINIT, &ed->ed_flags)) 52.187 + if ( test_bit(EDF_DONEINIT, &ed->ed_flags) ) 52.188 return 0; 52.189 52.190 - memcpy(ed->arch.traps, 52.191 - &c->trap_ctxt, 52.192 - sizeof(ed->arch.traps)); 52.193 - 52.194 if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 ) 52.195 return rc; 52.196 52.197 - ed->arch.ldt_base = c->ldt_base; 52.198 - ed->arch.ldt_ents = c->ldt_ents; 52.199 - 52.200 - ed->arch.kernel_ss = c->kernel_ss; 52.201 - ed->arch.kernel_sp = c->kernel_esp; 52.202 - 52.203 + memset(ed->arch.guest_context.debugreg, 0, 52.204 + sizeof(ed->arch.guest_context.debugreg)); 52.205 for ( i = 0; i < 8; i++ ) 52.206 (void)set_debugreg(ed, i, c->debugreg[i]); 52.207 52.208 -#if defined(__i386__) 52.209 - ed->arch.event_selector = c->event_callback_cs; 52.210 - ed->arch.event_address = c->event_callback_eip; 52.211 - ed->arch.failsafe_selector = c->failsafe_callback_cs; 52.212 - ed->arch.failsafe_address = c->failsafe_callback_eip; 52.213 -#elif defined(__x86_64__) 52.214 - ed->arch.event_address = c->event_callback_eip; 52.215 - ed->arch.failsafe_address = c->failsafe_callback_eip; 52.216 - ed->arch.syscall_address = c->syscall_callback_eip; 52.217 -#endif 52.218 - 52.219 if ( ed->eid == 0 ) 52.220 d->vm_assist = c->vm_assist; 52.221 52.222 @@ -475,7 +456,7 @@ int arch_set_info_guest( 52.223 } 52.224 52.225 #ifdef CONFIG_VMX 52.226 - if ( c->flags & ECF_VMX_GUEST ) 52.227 + if ( c->flags & VGCF_VMX_GUEST ) 52.228 { 52.229 int error; 52.230 52.231 @@ -507,7 +488,7 @@ void new_thread(struct exec_domain *d, 52.232 unsigned long start_stack, 52.233 unsigned long start_info) 52.234 { 52.235 - execution_context_t *ec = &d->arch.user_ctxt; 52.236 + struct cpu_user_regs *regs = &d->arch.guest_context.user_regs; 52.237 52.238 /* 52.239 * Initial register values: 52.240 @@ -517,15 +498,15 @@ void new_thread(struct exec_domain *d, 52.241 * ESI = start_info 52.242 * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 52.243 */ 52.244 - ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS; 52.245 - ec->ss = FLAT_KERNEL_SS; 52.246 - ec->cs = FLAT_KERNEL_CS; 52.247 - ec->eip = start_pc; 52.248 - ec->esp = start_stack; 52.249 - ec->esi = start_info; 52.250 + regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS; 52.251 + regs->ss = FLAT_KERNEL_SS; 52.252 + regs->cs = FLAT_KERNEL_CS; 52.253 + regs->eip = start_pc; 52.254 + regs->esp = start_stack; 52.255 + regs->esi = start_info; 52.256 52.257 - __save_flags(ec->eflags); 52.258 - ec->eflags |= X86_EFLAGS_IF; 52.259 + __save_flags(regs->eflags); 52.260 + regs->eflags |= X86_EFLAGS_IF; 52.261 } 52.262 52.263 52.264 @@ -557,63 +538,63 @@ void toggle_guest_mode(struct exec_domai 52.265 52.266 static void load_segments(struct exec_domain *p, struct exec_domain *n) 52.267 { 52.268 + struct vcpu_guest_context *pctxt = &p->arch.guest_context; 52.269 + struct vcpu_guest_context *nctxt = &n->arch.guest_context; 52.270 int all_segs_okay = 1; 52.271 52.272 /* Either selector != 0 ==> reload. */ 52.273 - if ( unlikely(p->arch.user_ctxt.ds | 52.274 - n->arch.user_ctxt.ds) ) 52.275 - all_segs_okay &= loadsegment(ds, n->arch.user_ctxt.ds); 52.276 + if ( unlikely(pctxt->user_regs.ds | nctxt->user_regs.ds) ) 52.277 + all_segs_okay &= loadsegment(ds, nctxt->user_regs.ds); 52.278 52.279 /* Either selector != 0 ==> reload. */ 52.280 - if ( unlikely(p->arch.user_ctxt.es | 52.281 - n->arch.user_ctxt.es) ) 52.282 - all_segs_okay &= loadsegment(es, n->arch.user_ctxt.es); 52.283 + if ( unlikely(pctxt->user_regs.es | nctxt->user_regs.es) ) 52.284 + all_segs_okay &= loadsegment(es, nctxt->user_regs.es); 52.285 52.286 /* 52.287 * Either selector != 0 ==> reload. 52.288 * Also reload to reset FS_BASE if it was non-zero. 52.289 */ 52.290 - if ( unlikely(p->arch.user_ctxt.fs | 52.291 - p->arch.user_ctxt.fs_base | 52.292 - n->arch.user_ctxt.fs) ) 52.293 + if ( unlikely(pctxt->user_regs.fs | 52.294 + pctxt->fs_base | 52.295 + nctxt->user_regs.fs) ) 52.296 { 52.297 - all_segs_okay &= loadsegment(fs, n->arch.user_ctxt.fs); 52.298 - if ( p->arch.user_ctxt.fs ) /* != 0 selector kills fs_base */ 52.299 - p->arch.user_ctxt.fs_base = 0; 52.300 + all_segs_okay &= loadsegment(fs, nctxt->user_regs.fs); 52.301 + if ( pctxt->user_regs.fs ) /* != 0 selector kills fs_base */ 52.302 + pctxt->fs_base = 0; 52.303 } 52.304 52.305 /* 52.306 * Either selector != 0 ==> reload. 52.307 * Also reload to reset GS_BASE if it was non-zero. 52.308 */ 52.309 - if ( unlikely(p->arch.user_ctxt.gs | 52.310 - p->arch.user_ctxt.gs_base_user | 52.311 - n->arch.user_ctxt.gs) ) 52.312 + if ( unlikely(pctxt->user_regs.gs | 52.313 + pctxt->gs_base_user | 52.314 + nctxt->user_regs.gs) ) 52.315 { 52.316 /* Reset GS_BASE with user %gs? */ 52.317 - if ( p->arch.user_ctxt.gs || !n->arch.user_ctxt.gs_base_user ) 52.318 - all_segs_okay &= loadsegment(gs, n->arch.user_ctxt.gs); 52.319 - if ( p->arch.user_ctxt.gs ) /* != 0 selector kills gs_base_user */ 52.320 - p->arch.user_ctxt.gs_base_user = 0; 52.321 + if ( pctxt->user_regs.gs || !nctxt->gs_base_user ) 52.322 + all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs); 52.323 + if ( pctxt->user_regs.gs ) /* != 0 selector kills gs_base_user */ 52.324 + pctxt->gs_base_user = 0; 52.325 } 52.326 52.327 /* This can only be non-zero if selector is NULL. */ 52.328 - if ( n->arch.user_ctxt.fs_base ) 52.329 + if ( nctxt->fs_base ) 52.330 wrmsr(MSR_FS_BASE, 52.331 - n->arch.user_ctxt.fs_base, 52.332 - n->arch.user_ctxt.fs_base>>32); 52.333 + nctxt->fs_base, 52.334 + nctxt->fs_base>>32); 52.335 52.336 /* Most kernels have non-zero GS base, so don't bother testing. */ 52.337 /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ 52.338 wrmsr(MSR_SHADOW_GS_BASE, 52.339 - n->arch.user_ctxt.gs_base_kernel, 52.340 - n->arch.user_ctxt.gs_base_kernel>>32); 52.341 + nctxt->gs_base_kernel, 52.342 + nctxt->gs_base_kernel>>32); 52.343 52.344 /* This can only be non-zero if selector is NULL. */ 52.345 - if ( n->arch.user_ctxt.gs_base_user ) 52.346 + if ( nctxt->gs_base_user ) 52.347 wrmsr(MSR_GS_BASE, 52.348 - n->arch.user_ctxt.gs_base_user, 52.349 - n->arch.user_ctxt.gs_base_user>>32); 52.350 + nctxt->gs_base_user, 52.351 + nctxt->gs_base_user>>32); 52.352 52.353 /* If in kernel mode then switch the GS bases around. */ 52.354 if ( n->arch.flags & TF_kernel_mode ) 52.355 @@ -621,28 +602,28 @@ static void load_segments(struct exec_do 52.356 52.357 if ( unlikely(!all_segs_okay) ) 52.358 { 52.359 - struct xen_regs *regs = get_execution_context(); 52.360 + struct cpu_user_regs *regs = get_cpu_user_regs(); 52.361 unsigned long *rsp = 52.362 (n->arch.flags & TF_kernel_mode) ? 52.363 (unsigned long *)regs->rsp : 52.364 - (unsigned long *)n->arch.kernel_sp; 52.365 + (unsigned long *)nctxt->kernel_sp; 52.366 52.367 if ( !(n->arch.flags & TF_kernel_mode) ) 52.368 toggle_guest_mode(n); 52.369 else 52.370 regs->cs &= ~3; 52.371 52.372 - if ( put_user(regs->ss, rsp- 1) | 52.373 - put_user(regs->rsp, rsp- 2) | 52.374 - put_user(regs->rflags, rsp- 3) | 52.375 - put_user(regs->cs, rsp- 4) | 52.376 - put_user(regs->rip, rsp- 5) | 52.377 - put_user(n->arch.user_ctxt.gs, rsp- 6) | 52.378 - put_user(n->arch.user_ctxt.fs, rsp- 7) | 52.379 - put_user(n->arch.user_ctxt.es, rsp- 8) | 52.380 - put_user(n->arch.user_ctxt.ds, rsp- 9) | 52.381 - put_user(regs->r11, rsp-10) | 52.382 - put_user(regs->rcx, rsp-11) ) 52.383 + if ( put_user(regs->ss, rsp- 1) | 52.384 + put_user(regs->rsp, rsp- 2) | 52.385 + put_user(regs->rflags, rsp- 3) | 52.386 + put_user(regs->cs, rsp- 4) | 52.387 + put_user(regs->rip, rsp- 5) | 52.388 + put_user(nctxt->user_regs.gs, rsp- 6) | 52.389 + put_user(nctxt->user_regs.fs, rsp- 7) | 52.390 + put_user(nctxt->user_regs.es, rsp- 8) | 52.391 + put_user(nctxt->user_regs.ds, rsp- 9) | 52.392 + put_user(regs->r11, rsp-10) | 52.393 + put_user(regs->rcx, rsp-11) ) 52.394 { 52.395 DPRINTK("Error while creating failsafe callback frame.\n"); 52.396 domain_crash(); 52.397 @@ -653,16 +634,17 @@ static void load_segments(struct exec_do 52.398 regs->ss = __GUEST_SS; 52.399 regs->rsp = (unsigned long)(rsp-11); 52.400 regs->cs = __GUEST_CS; 52.401 - regs->rip = n->arch.failsafe_address; 52.402 + regs->rip = nctxt->failsafe_callback_eip; 52.403 } 52.404 } 52.405 52.406 -static void save_segments(struct exec_domain *p) 52.407 +static void save_segments(struct exec_domain *ed) 52.408 { 52.409 - __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) ); 52.410 - __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) ); 52.411 - __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) ); 52.412 - __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) ); 52.413 + struct cpu_user_regs *regs = &ed->arch.guest_context.user_regs; 52.414 + __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (regs->ds) ); 52.415 + __asm__ __volatile__ ( "movl %%es,%0" : "=m" (regs->es) ); 52.416 + __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (regs->fs) ); 52.417 + __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (regs->gs) ); 52.418 } 52.419 52.420 static void clear_segments(void) 52.421 @@ -679,7 +661,7 @@ static void clear_segments(void) 52.422 52.423 long do_switch_to_user(void) 52.424 { 52.425 - struct xen_regs *regs = get_execution_context(); 52.426 + struct cpu_user_regs *regs = get_cpu_user_regs(); 52.427 struct switch_to_user stu; 52.428 struct exec_domain *ed = current; 52.429 52.430 @@ -695,7 +677,7 @@ long do_switch_to_user(void) 52.431 regs->rsp = stu.rsp; 52.432 regs->ss = stu.ss | 3; /* force guest privilege */ 52.433 52.434 - if ( !(stu.flags & ECF_IN_SYSCALL) ) 52.435 + if ( !(stu.flags & VGCF_IN_SYSCALL) ) 52.436 { 52.437 regs->entry_vector = 0; 52.438 regs->r11 = stu.r11; 52.439 @@ -717,8 +699,8 @@ long do_switch_to_user(void) 52.440 static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu) 52.441 { 52.442 struct tss_struct *tss = &init_tss[cpu]; 52.443 - tss->esp1 = n->arch.kernel_sp; 52.444 - tss->ss1 = n->arch.kernel_ss; 52.445 + tss->esp1 = n->arch.guest_context.kernel_sp; 52.446 + tss->ss1 = n->arch.guest_context.kernel_ss; 52.447 } 52.448 52.449 #endif 52.450 @@ -728,15 +710,15 @@ static inline void switch_kernel_stack(s 52.451 52.452 static void __context_switch(void) 52.453 { 52.454 - execution_context_t *stack_ec = get_execution_context(); 52.455 + struct cpu_user_regs *stack_regs = get_cpu_user_regs(); 52.456 unsigned int cpu = smp_processor_id(); 52.457 struct exec_domain *p = percpu_ctxt[cpu].curr_ed; 52.458 struct exec_domain *n = current; 52.459 52.460 if ( !is_idle_task(p->domain) ) 52.461 { 52.462 - memcpy(&p->arch.user_ctxt, 52.463 - stack_ec, 52.464 + memcpy(&p->arch.guest_context.user_regs, 52.465 + stack_regs, 52.466 CTXT_SWITCH_STACK_BYTES); 52.467 unlazy_fpu(p); 52.468 CLEAR_FAST_TRAP(&p->arch); 52.469 @@ -745,20 +727,20 @@ static void __context_switch(void) 52.470 52.471 if ( !is_idle_task(n->domain) ) 52.472 { 52.473 - memcpy(stack_ec, 52.474 - &n->arch.user_ctxt, 52.475 + memcpy(stack_regs, 52.476 + &n->arch.guest_context.user_regs, 52.477 CTXT_SWITCH_STACK_BYTES); 52.478 52.479 /* Maybe switch the debug registers. */ 52.480 - if ( unlikely(n->arch.debugreg[7]) ) 52.481 + if ( unlikely(n->arch.guest_context.debugreg[7]) ) 52.482 { 52.483 - loaddebug(&n->arch, 0); 52.484 - loaddebug(&n->arch, 1); 52.485 - loaddebug(&n->arch, 2); 52.486 - loaddebug(&n->arch, 3); 52.487 + loaddebug(&n->arch.guest_context, 0); 52.488 + loaddebug(&n->arch.guest_context, 1); 52.489 + loaddebug(&n->arch.guest_context, 2); 52.490 + loaddebug(&n->arch.guest_context, 3); 52.491 /* no 4 and 5 */ 52.492 - loaddebug(&n->arch, 6); 52.493 - loaddebug(&n->arch, 7); 52.494 + loaddebug(&n->arch.guest_context, 6); 52.495 + loaddebug(&n->arch.guest_context, 7); 52.496 } 52.497 52.498 if ( !VMX_DOMAIN(n) ) 52.499 @@ -816,7 +798,12 @@ void context_switch(struct exec_domain * 52.500 clear_bit(EDF_RUNNING, &prev->ed_flags); 52.501 52.502 schedule_tail(next); 52.503 + BUG(); 52.504 +} 52.505 52.506 +void continue_running(struct exec_domain *same) 52.507 +{ 52.508 + schedule_tail(same); 52.509 BUG(); 52.510 } 52.511 52.512 @@ -844,7 +831,7 @@ unsigned long __hypercall_create_continu 52.513 unsigned int op, unsigned int nr_args, ...) 52.514 { 52.515 struct mc_state *mcs = &mc_state[smp_processor_id()]; 52.516 - execution_context_t *ec; 52.517 + struct cpu_user_regs *regs; 52.518 unsigned int i; 52.519 va_list args; 52.520 52.521 @@ -859,37 +846,37 @@ unsigned long __hypercall_create_continu 52.522 } 52.523 else 52.524 { 52.525 - ec = get_execution_context(); 52.526 + regs = get_cpu_user_regs(); 52.527 #if defined(__i386__) 52.528 - ec->eax = op; 52.529 - ec->eip -= 2; /* re-execute 'int 0x82' */ 52.530 + regs->eax = op; 52.531 + regs->eip -= 2; /* re-execute 'int 0x82' */ 52.532 52.533 for ( i = 0; i < nr_args; i++ ) 52.534 { 52.535 switch ( i ) 52.536 { 52.537 - case 0: ec->ebx = va_arg(args, unsigned long); break; 52.538 - case 1: ec->ecx = va_arg(args, unsigned long); break; 52.539 - case 2: ec->edx = va_arg(args, unsigned long); break; 52.540 - case 3: ec->esi = va_arg(args, unsigned long); break; 52.541 - case 4: ec->edi = va_arg(args, unsigned long); break; 52.542 - case 5: ec->ebp = va_arg(args, unsigned long); break; 52.543 + case 0: regs->ebx = va_arg(args, unsigned long); break; 52.544 + case 1: regs->ecx = va_arg(args, unsigned long); break; 52.545 + case 2: regs->edx = va_arg(args, unsigned long); break; 52.546 + case 3: regs->esi = va_arg(args, unsigned long); break; 52.547 + case 4: regs->edi = va_arg(args, unsigned long); break; 52.548 + case 5: regs->ebp = va_arg(args, unsigned long); break; 52.549 } 52.550 } 52.551 #elif defined(__x86_64__) 52.552 - ec->rax = op; 52.553 - ec->rip -= 2; /* re-execute 'syscall' */ 52.554 + regs->rax = op; 52.555 + regs->rip -= 2; /* re-execute 'syscall' */ 52.556 52.557 for ( i = 0; i < nr_args; i++ ) 52.558 { 52.559 switch ( i ) 52.560 { 52.561 - case 0: ec->rdi = va_arg(args, unsigned long); break; 52.562 - case 1: ec->rsi = va_arg(args, unsigned long); break; 52.563 - case 2: ec->rdx = va_arg(args, unsigned long); break; 52.564 - case 3: ec->r10 = va_arg(args, unsigned long); break; 52.565 - case 4: ec->r8 = va_arg(args, unsigned long); break; 52.566 - case 5: ec->r9 = va_arg(args, unsigned long); break; 52.567 + case 0: regs->rdi = va_arg(args, unsigned long); break; 52.568 + case 1: regs->rsi = va_arg(args, unsigned long); break; 52.569 + case 2: regs->rdx = va_arg(args, unsigned long); break; 52.570 + case 3: regs->r10 = va_arg(args, unsigned long); break; 52.571 + case 4: regs->r8 = va_arg(args, unsigned long); break; 52.572 + case 5: regs->r9 = va_arg(args, unsigned long); break; 52.573 } 52.574 } 52.575 #endif
53.1 --- a/xen/arch/x86/domain_build.c Thu Apr 28 13:54:01 2005 +0000 53.2 +++ b/xen/arch/x86/domain_build.c Fri Apr 29 07:34:47 2005 +0000 53.3 @@ -222,14 +222,15 @@ int construct_dom0(struct domain *d, 53.4 * We're basically forcing default RPLs to 1, so that our "what privilege 53.5 * level are we returning to?" logic works. 53.6 */ 53.7 - ed->arch.failsafe_selector = FLAT_KERNEL_CS; 53.8 - ed->arch.event_selector = FLAT_KERNEL_CS; 53.9 - ed->arch.kernel_ss = FLAT_KERNEL_SS; 53.10 + ed->arch.guest_context.kernel_ss = FLAT_KERNEL_SS; 53.11 for ( i = 0; i < 256; i++ ) 53.12 - ed->arch.traps[i].cs = FLAT_KERNEL_CS; 53.13 + ed->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS; 53.14 53.15 #if defined(__i386__) 53.16 53.17 + ed->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS; 53.18 + ed->arch.guest_context.event_callback_cs = FLAT_KERNEL_CS; 53.19 + 53.20 /* 53.21 * Protect the lowest 1GB of memory. We use a temporary mapping there 53.22 * from which we copy the kernel and ramdisk images.
54.1 --- a/xen/arch/x86/extable.c Thu Apr 28 13:54:01 2005 +0000 54.2 +++ b/xen/arch/x86/extable.c Fri Apr 29 07:34:47 2005 +0000 54.3 @@ -68,7 +68,7 @@ search_exception_table(unsigned long add 54.4 } 54.5 54.6 unsigned long 54.7 -search_pre_exception_table(struct xen_regs *regs) 54.8 +search_pre_exception_table(struct cpu_user_regs *regs) 54.9 { 54.10 unsigned long addr = (unsigned long)regs->eip; 54.11 unsigned long fixup = search_one_table(
55.1 --- a/xen/arch/x86/i387.c Thu Apr 28 13:54:01 2005 +0000 55.2 +++ b/xen/arch/x86/i387.c Fri Apr 29 07:34:47 2005 +0000 55.3 @@ -34,11 +34,11 @@ void save_init_fpu(struct exec_domain *t 55.4 if ( cpu_has_fxsr ) 55.5 __asm__ __volatile__ ( 55.6 "fxsave %0 ; fnclex" 55.7 - : "=m" (tsk->arch.i387) ); 55.8 + : "=m" (tsk->arch.guest_context.fpu_ctxt) ); 55.9 else 55.10 __asm__ __volatile__ ( 55.11 "fnsave %0 ; fwait" 55.12 - : "=m" (tsk->arch.i387) ); 55.13 + : "=m" (tsk->arch.guest_context.fpu_ctxt) ); 55.14 55.15 clear_bit(EDF_USEDFPU, &tsk->ed_flags); 55.16 stts(); 55.17 @@ -46,14 +46,38 @@ void save_init_fpu(struct exec_domain *t 55.18 55.19 void restore_fpu(struct exec_domain *tsk) 55.20 { 55.21 + /* 55.22 + * FXRSTOR can fault if passed a corrupted data block. We handle this 55.23 + * possibility, which may occur if the block was passed to us by control 55.24 + * tools, by silently clearing the block. 55.25 + */ 55.26 if ( cpu_has_fxsr ) 55.27 __asm__ __volatile__ ( 55.28 - "fxrstor %0" 55.29 - : : "m" (tsk->arch.i387) ); 55.30 + "1: fxrstor %0 \n" 55.31 + ".section .fixup,\"ax\" \n" 55.32 + "2: push %%"__OP"ax \n" 55.33 + " push %%"__OP"cx \n" 55.34 + " push %%"__OP"di \n" 55.35 + " lea %0,%%"__OP"di \n" 55.36 + " mov %1,%%ecx \n" 55.37 + " xor %%eax,%%eax \n" 55.38 + " rep ; stosl \n" 55.39 + " pop %%"__OP"di \n" 55.40 + " pop %%"__OP"cx \n" 55.41 + " pop %%"__OP"ax \n" 55.42 + " jmp 1b \n" 55.43 + ".previous \n" 55.44 + ".section __ex_table,\"a\"\n" 55.45 + " "__FIXUP_ALIGN" \n" 55.46 + " "__FIXUP_WORD" 1b,2b \n" 55.47 + ".previous \n" 55.48 + : 55.49 + : "m" (tsk->arch.guest_context.fpu_ctxt), 55.50 + "i" (sizeof(tsk->arch.guest_context.fpu_ctxt)/4) ); 55.51 else 55.52 __asm__ __volatile__ ( 55.53 "frstor %0" 55.54 - : : "m" (tsk->arch.i387) ); 55.55 + : : "m" (tsk->arch.guest_context.fpu_ctxt) ); 55.56 } 55.57 55.58 /*
56.1 --- a/xen/arch/x86/irq.c Thu Apr 28 13:54:01 2005 +0000 56.2 +++ b/xen/arch/x86/irq.c Fri Apr 29 07:34:47 2005 +0000 56.3 @@ -17,7 +17,7 @@ irq_desc_t irq_desc[NR_IRQS]; 56.4 56.5 static void __do_IRQ_guest(int irq); 56.6 56.7 -void no_action(int cpl, void *dev_id, struct xen_regs *regs) { } 56.8 +void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { } 56.9 56.10 static void enable_none(unsigned int irq) { } 56.11 static unsigned int startup_none(unsigned int irq) { return 0; } 56.12 @@ -87,7 +87,7 @@ void enable_irq(unsigned int irq) 56.13 spin_unlock_irqrestore(&desc->lock, flags); 56.14 } 56.15 56.16 -asmlinkage void do_IRQ(struct xen_regs *regs) 56.17 +asmlinkage void do_IRQ(struct cpu_user_regs *regs) 56.18 { 56.19 unsigned int irq = regs->entry_vector; 56.20 irq_desc_t *desc = &irq_desc[irq];
57.1 --- a/xen/arch/x86/mm.c Thu Apr 28 13:54:01 2005 +0000 57.2 +++ b/xen/arch/x86/mm.c Fri Apr 29 07:34:47 2005 +0000 57.3 @@ -285,7 +285,7 @@ int map_ldt_shadow_page(unsigned int off 57.4 struct domain *d = ed->domain; 57.5 unsigned long gpfn, gmfn; 57.6 l1_pgentry_t l1e, nl1e; 57.7 - unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT); 57.8 + unsigned gva = ed->arch.guest_context.ldt_base + (off << PAGE_SHIFT); 57.9 int res; 57.10 57.11 #if defined(__x86_64__) 57.12 @@ -1639,12 +1639,12 @@ int do_mmuext_op( 57.13 okay = 0; 57.14 MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents); 57.15 } 57.16 - else if ( (ed->arch.ldt_ents != ents) || 57.17 - (ed->arch.ldt_base != ptr) ) 57.18 + else if ( (ed->arch.guest_context.ldt_ents != ents) || 57.19 + (ed->arch.guest_context.ldt_base != ptr) ) 57.20 { 57.21 invalidate_shadow_ldt(ed); 57.22 - ed->arch.ldt_base = ptr; 57.23 - ed->arch.ldt_ents = ents; 57.24 + ed->arch.guest_context.ldt_base = ptr; 57.25 + ed->arch.guest_context.ldt_ents = ents; 57.26 load_LDT(ed); 57.27 percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT; 57.28 if ( ents != 0 ) 57.29 @@ -2842,7 +2842,7 @@ int ptwr_do_page_fault(struct domain *d, 57.30 return EXCRET_fault_fixed; 57.31 57.32 emulate: 57.33 - if ( x86_emulate_memop(get_execution_context(), addr, 57.34 + if ( x86_emulate_memop(get_cpu_user_regs(), addr, 57.35 &ptwr_mem_emulator, BITS_PER_LONG/8) ) 57.36 return 0; 57.37 perfc_incrc(ptwr_emulations);
58.1 --- a/xen/arch/x86/nmi.c Thu Apr 28 13:54:01 2005 +0000 58.2 +++ b/xen/arch/x86/nmi.c Fri Apr 29 07:34:47 2005 +0000 58.3 @@ -267,7 +267,7 @@ void touch_nmi_watchdog (void) 58.4 alert_counter[i] = 0; 58.5 } 58.6 58.7 -void nmi_watchdog_tick (struct xen_regs * regs) 58.8 +void nmi_watchdog_tick (struct cpu_user_regs * regs) 58.9 { 58.10 int sum, cpu = smp_processor_id(); 58.11
59.1 --- a/xen/arch/x86/shadow.c Thu Apr 28 13:54:01 2005 +0000 59.2 +++ b/xen/arch/x86/shadow.c Fri Apr 29 07:34:47 2005 +0000 59.3 @@ -2421,7 +2421,7 @@ void __shadow_sync_all(struct domain *d) 59.4 free_out_of_sync_state(d); 59.5 } 59.6 59.7 -int shadow_fault(unsigned long va, struct xen_regs *regs) 59.8 +int shadow_fault(unsigned long va, struct cpu_user_regs *regs) 59.9 { 59.10 l1_pgentry_t gpte, spte, orig_gpte; 59.11 struct exec_domain *ed = current;
60.1 --- a/xen/arch/x86/time.c Thu Apr 28 13:54:01 2005 +0000 60.2 +++ b/xen/arch/x86/time.c Fri Apr 29 07:34:47 2005 +0000 60.3 @@ -51,7 +51,7 @@ static s_time_t stime_irq; 60.4 static unsigned long wc_sec, wc_usec; /* UTC time at last 'time update'. */ 60.5 static rwlock_t time_lock = RW_LOCK_UNLOCKED; 60.6 60.7 -void timer_interrupt(int irq, void *dev_id, struct xen_regs *regs) 60.8 +void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) 60.9 { 60.10 write_lock_irq(&time_lock); 60.11
61.1 --- a/xen/arch/x86/trampoline.S Thu Apr 28 13:54:01 2005 +0000 61.2 +++ b/xen/arch/x86/trampoline.S Fri Apr 29 07:34:47 2005 +0000 61.3 @@ -60,8 +60,7 @@ gdt_48: 61.4 #else 61.5 .long 0x100200 # gdt_table 61.6 #endif 61.7 - 61.8 -.globl SYMBOL_NAME(trampoline_end) 61.9 -SYMBOL_NAME_LABEL(trampoline_end) 61.10 + 61.11 +ENTRY(trampoline_end) 61.12 61.13 #endif /* CONFIG_SMP */
62.1 --- a/xen/arch/x86/traps.c Thu Apr 28 13:54:01 2005 +0000 62.2 +++ b/xen/arch/x86/traps.c Fri Apr 29 07:34:47 2005 +0000 62.3 @@ -95,7 +95,7 @@ asmlinkage void machine_check(void); 62.4 * are disabled). In such situations we can't do much that is safe. We try to 62.5 * print out some tracing and then we just spin. 62.6 */ 62.7 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs) 62.8 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs) 62.9 { 62.10 int cpu = smp_processor_id(); 62.11 unsigned long cr2; 62.12 @@ -136,7 +136,7 @@ asmlinkage void fatal_trap(int trapnr, s 62.13 } 62.14 62.15 static inline int do_trap(int trapnr, char *str, 62.16 - struct xen_regs *regs, 62.17 + struct cpu_user_regs *regs, 62.18 int use_error_code) 62.19 { 62.20 struct exec_domain *ed = current; 62.21 @@ -150,11 +150,12 @@ static inline int do_trap(int trapnr, ch 62.22 goto xen_fault; 62.23 62.24 #ifndef NDEBUG 62.25 - if ( (ed->arch.traps[trapnr].address == 0) && (ed->domain->id == 0) ) 62.26 + if ( (ed->arch.guest_context.trap_ctxt[trapnr].address == 0) && 62.27 + (ed->domain->id == 0) ) 62.28 goto xen_fault; 62.29 #endif 62.30 62.31 - ti = current->arch.traps + trapnr; 62.32 + ti = ¤t->arch.guest_context.trap_ctxt[trapnr]; 62.33 tb->flags = TBF_EXCEPTION; 62.34 tb->cs = ti->cs; 62.35 tb->eip = ti->address; 62.36 @@ -186,13 +187,13 @@ static inline int do_trap(int trapnr, ch 62.37 } 62.38 62.39 #define DO_ERROR_NOCODE(trapnr, str, name) \ 62.40 -asmlinkage int do_##name(struct xen_regs *regs) \ 62.41 +asmlinkage int do_##name(struct cpu_user_regs *regs) \ 62.42 { \ 62.43 return do_trap(trapnr, str, regs, 0); \ 62.44 } 62.45 62.46 #define DO_ERROR(trapnr, str, name) \ 62.47 -asmlinkage int do_##name(struct xen_regs *regs) \ 62.48 +asmlinkage int do_##name(struct cpu_user_regs *regs) \ 62.49 { \ 62.50 return do_trap(trapnr, str, regs, 1); \ 62.51 } 62.52 @@ -209,7 +210,7 @@ DO_ERROR_NOCODE(16, "fpu error", coproce 62.53 DO_ERROR(17, "alignment check", alignment_check) 62.54 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error) 62.55 62.56 -asmlinkage int do_int3(struct xen_regs *regs) 62.57 +asmlinkage int do_int3(struct cpu_user_regs *regs) 62.58 { 62.59 struct exec_domain *ed = current; 62.60 struct trap_bounce *tb = &ed->arch.trap_bounce; 62.61 @@ -224,7 +225,7 @@ asmlinkage int do_int3(struct xen_regs * 62.62 panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id()); 62.63 } 62.64 62.65 - ti = current->arch.traps + 3; 62.66 + ti = ¤t->arch.guest_context.trap_ctxt[TRAP_int3]; 62.67 tb->flags = TBF_EXCEPTION; 62.68 tb->cs = ti->cs; 62.69 tb->eip = ti->address; 62.70 @@ -234,7 +235,7 @@ asmlinkage int do_int3(struct xen_regs * 62.71 return 0; 62.72 } 62.73 62.74 -asmlinkage void do_machine_check(struct xen_regs *regs) 62.75 +asmlinkage void do_machine_check(struct cpu_user_regs *regs) 62.76 { 62.77 fatal_trap(TRAP_machine_check, regs); 62.78 } 62.79 @@ -245,7 +246,7 @@ void propagate_page_fault(unsigned long 62.80 struct exec_domain *ed = current; 62.81 struct trap_bounce *tb = &ed->arch.trap_bounce; 62.82 62.83 - ti = ed->arch.traps + 14; 62.84 + ti = &ed->arch.guest_context.trap_ctxt[TRAP_page_fault]; 62.85 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2; 62.86 tb->cr2 = addr; 62.87 tb->error_code = error_code; 62.88 @@ -257,7 +258,7 @@ void propagate_page_fault(unsigned long 62.89 ed->arch.guest_cr2 = addr; 62.90 } 62.91 62.92 -asmlinkage int do_page_fault(struct xen_regs *regs) 62.93 +asmlinkage int do_page_fault(struct cpu_user_regs *regs) 62.94 { 62.95 unsigned long off, addr, fixup; 62.96 struct exec_domain *ed = current; 62.97 @@ -303,7 +304,8 @@ asmlinkage int do_page_fault(struct xen_ 62.98 } 62.99 62.100 if ( unlikely(addr >= LDT_VIRT_START(ed)) && 62.101 - (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) ) 62.102 + (addr < (LDT_VIRT_START(ed) + 62.103 + (ed->arch.guest_context.ldt_ents*LDT_ENTRY_SIZE))) ) 62.104 { 62.105 /* 62.106 * Copy a mapping from the guest's LDT, if it is valid. Otherwise we 62.107 @@ -312,7 +314,7 @@ asmlinkage int do_page_fault(struct xen_ 62.108 extern int map_ldt_shadow_page(unsigned int); 62.109 LOCK_BIGLOCK(d); 62.110 off = addr - LDT_VIRT_START(ed); 62.111 - addr = ed->arch.ldt_base + off; 62.112 + addr = ed->arch.guest_context.ldt_base + off; 62.113 ret = map_ldt_shadow_page(off >> PAGE_SHIFT); 62.114 UNLOCK_BIGLOCK(d); 62.115 if ( likely(ret) ) 62.116 @@ -323,7 +325,8 @@ asmlinkage int do_page_fault(struct xen_ 62.117 goto xen_fault; 62.118 62.119 #ifndef NDEBUG 62.120 - if ( (ed->arch.traps[TRAP_page_fault].address == 0) && (d->id == 0) ) 62.121 + if ( (ed->arch.guest_context.trap_ctxt[TRAP_page_fault].address == 0) && 62.122 + (d->id == 0) ) 62.123 goto xen_fault; 62.124 #endif 62.125 62.126 @@ -374,7 +377,7 @@ long do_fpu_taskswitch(int set) 62.127 /* Has the guest requested sufficient permission for this I/O access? */ 62.128 static inline int guest_io_okay( 62.129 unsigned int port, unsigned int bytes, 62.130 - struct exec_domain *ed, struct xen_regs *regs) 62.131 + struct exec_domain *ed, struct cpu_user_regs *regs) 62.132 { 62.133 u16 x; 62.134 #if defined(__x86_64__) 62.135 @@ -404,7 +407,7 @@ static inline int guest_io_okay( 62.136 /* Has the administrator granted sufficient permission for this I/O access? */ 62.137 static inline int admin_io_okay( 62.138 unsigned int port, unsigned int bytes, 62.139 - struct exec_domain *ed, struct xen_regs *regs) 62.140 + struct exec_domain *ed, struct cpu_user_regs *regs) 62.141 { 62.142 struct domain *d = ed->domain; 62.143 u16 x; 62.144 @@ -436,7 +439,7 @@ static inline int admin_io_okay( 62.145 goto read_fault; \ 62.146 eip += _size; (_type)_x; }) 62.147 62.148 -static int emulate_privileged_op(struct xen_regs *regs) 62.149 +static int emulate_privileged_op(struct cpu_user_regs *regs) 62.150 { 62.151 struct exec_domain *ed = current; 62.152 unsigned long *reg, eip = regs->eip; 62.153 @@ -743,7 +746,7 @@ static int emulate_privileged_op(struct 62.154 return EXCRET_fault_fixed; 62.155 } 62.156 62.157 -asmlinkage int do_general_protection(struct xen_regs *regs) 62.158 +asmlinkage int do_general_protection(struct cpu_user_regs *regs) 62.159 { 62.160 struct exec_domain *ed = current; 62.161 struct trap_bounce *tb = &ed->arch.trap_bounce; 62.162 @@ -781,7 +784,7 @@ asmlinkage int do_general_protection(str 62.163 if ( (regs->error_code & 3) == 2 ) 62.164 { 62.165 /* This fault must be due to <INT n> instruction. */ 62.166 - ti = current->arch.traps + (regs->error_code>>3); 62.167 + ti = ¤t->arch.guest_context.trap_ctxt[regs->error_code>>3]; 62.168 if ( PERMIT_SOFTINT(TI_GET_DPL(ti), ed, regs) ) 62.169 { 62.170 tb->flags = TBF_EXCEPTION; 62.171 @@ -803,13 +806,13 @@ asmlinkage int do_general_protection(str 62.172 #endif 62.173 62.174 #ifndef NDEBUG 62.175 - if ( (ed->arch.traps[TRAP_gp_fault].address == 0) && 62.176 + if ( (ed->arch.guest_context.trap_ctxt[TRAP_gp_fault].address == 0) && 62.177 (ed->domain->id == 0) ) 62.178 goto gp_in_kernel; 62.179 #endif 62.180 62.181 /* Pass on GPF as is. */ 62.182 - ti = current->arch.traps + 13; 62.183 + ti = ¤t->arch.guest_context.trap_ctxt[TRAP_gp_fault]; 62.184 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; 62.185 tb->error_code = regs->error_code; 62.186 finish_propagation: 62.187 @@ -851,7 +854,7 @@ static void nmi_softirq(void) 62.188 send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR); 62.189 } 62.190 62.191 -asmlinkage void mem_parity_error(struct xen_regs *regs) 62.192 +asmlinkage void mem_parity_error(struct cpu_user_regs *regs) 62.193 { 62.194 /* Clear and disable the parity-error line. */ 62.195 outb((inb(0x61)&15)|4,0x61); 62.196 @@ -870,7 +873,7 @@ asmlinkage void mem_parity_error(struct 62.197 } 62.198 } 62.199 62.200 -asmlinkage void io_check_error(struct xen_regs *regs) 62.201 +asmlinkage void io_check_error(struct cpu_user_regs *regs) 62.202 { 62.203 /* Clear and disable the I/O-error line. */ 62.204 outb((inb(0x61)&15)|8,0x61); 62.205 @@ -896,7 +899,7 @@ static void unknown_nmi_error(unsigned c 62.206 printk("Do you have a strange power saving mode enabled?\n"); 62.207 } 62.208 62.209 -asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason) 62.210 +asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason) 62.211 { 62.212 ++nmi_count(smp_processor_id()); 62.213 62.214 @@ -911,32 +914,31 @@ asmlinkage void do_nmi(struct xen_regs * 62.215 unknown_nmi_error((unsigned char)(reason&0xff)); 62.216 } 62.217 62.218 -asmlinkage int math_state_restore(struct xen_regs *regs) 62.219 +asmlinkage int math_state_restore(struct cpu_user_regs *regs) 62.220 { 62.221 /* Prevent recursion. */ 62.222 clts(); 62.223 62.224 - if ( !test_bit(EDF_USEDFPU, ¤t->ed_flags) ) 62.225 + if ( !test_and_set_bit(EDF_USEDFPU, ¤t->ed_flags) ) 62.226 { 62.227 if ( test_bit(EDF_DONEFPUINIT, ¤t->ed_flags) ) 62.228 restore_fpu(current); 62.229 else 62.230 init_fpu(); 62.231 - set_bit(EDF_USEDFPU, ¤t->ed_flags); /* so we fnsave on switch_to() */ 62.232 } 62.233 62.234 if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->ed_flags) ) 62.235 { 62.236 struct trap_bounce *tb = ¤t->arch.trap_bounce; 62.237 - tb->flags = TBF_EXCEPTION; 62.238 - tb->cs = current->arch.traps[7].cs; 62.239 - tb->eip = current->arch.traps[7].address; 62.240 + tb->flags = TBF_EXCEPTION; 62.241 + tb->cs = current->arch.guest_context.trap_ctxt[7].cs; 62.242 + tb->eip = current->arch.guest_context.trap_ctxt[7].address; 62.243 } 62.244 62.245 return EXCRET_fault_fixed; 62.246 } 62.247 62.248 -asmlinkage int do_debug(struct xen_regs *regs) 62.249 +asmlinkage int do_debug(struct cpu_user_regs *regs) 62.250 { 62.251 unsigned long condition; 62.252 struct exec_domain *ed = current; 62.253 @@ -946,7 +948,7 @@ asmlinkage int do_debug(struct xen_regs 62.254 62.255 /* Mask out spurious debug traps due to lazy DR7 setting */ 62.256 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) && 62.257 - (ed->arch.debugreg[7] == 0) ) 62.258 + (ed->arch.guest_context.debugreg[7] == 0) ) 62.259 { 62.260 __asm__("mov %0,%%db7" : : "r" (0UL)); 62.261 goto out; 62.262 @@ -968,17 +970,17 @@ asmlinkage int do_debug(struct xen_regs 62.263 } 62.264 62.265 /* Save debug status register where guest OS can peek at it */ 62.266 - ed->arch.debugreg[6] = condition; 62.267 + ed->arch.guest_context.debugreg[6] = condition; 62.268 62.269 tb->flags = TBF_EXCEPTION; 62.270 - tb->cs = ed->arch.traps[1].cs; 62.271 - tb->eip = ed->arch.traps[1].address; 62.272 + tb->cs = ed->arch.guest_context.trap_ctxt[TRAP_debug].cs; 62.273 + tb->eip = ed->arch.guest_context.trap_ctxt[TRAP_debug].address; 62.274 62.275 out: 62.276 return EXCRET_not_a_fault; 62.277 } 62.278 62.279 -asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs) 62.280 +asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs) 62.281 { 62.282 return EXCRET_not_a_fault; 62.283 } 62.284 @@ -1059,7 +1061,7 @@ void __init trap_init(void) 62.285 long do_set_trap_table(trap_info_t *traps) 62.286 { 62.287 trap_info_t cur; 62.288 - trap_info_t *dst = current->arch.traps; 62.289 + trap_info_t *dst = current->arch.guest_context.trap_ctxt; 62.290 long rc = 0; 62.291 62.292 LOCK_BIGLOCK(current->domain); 62.293 @@ -1163,7 +1165,7 @@ long set_debugreg(struct exec_domain *p, 62.294 return -EINVAL; 62.295 } 62.296 62.297 - p->arch.debugreg[reg] = value; 62.298 + p->arch.guest_context.debugreg[reg] = value; 62.299 return 0; 62.300 } 62.301 62.302 @@ -1175,7 +1177,7 @@ long do_set_debugreg(int reg, unsigned l 62.303 unsigned long do_get_debugreg(int reg) 62.304 { 62.305 if ( (reg < 0) || (reg > 7) ) return -EINVAL; 62.306 - return current->arch.debugreg[reg]; 62.307 + return current->arch.guest_context.debugreg[reg]; 62.308 } 62.309 62.310 /*
63.1 --- a/xen/arch/x86/vmx.c Thu Apr 28 13:54:01 2005 +0000 63.2 +++ b/xen/arch/x86/vmx.c Fri Apr 29 07:34:47 2005 +0000 63.3 @@ -46,7 +46,7 @@ unsigned int opt_vmx_debug_level = 0; 63.4 63.5 extern long evtchn_send(int lport); 63.6 extern long do_block(void); 63.7 -void do_nmi(struct xen_regs *, unsigned long); 63.8 +void do_nmi(struct cpu_user_regs *, unsigned long); 63.9 63.10 int start_vmx() 63.11 { 63.12 @@ -105,7 +105,7 @@ static void inline __update_guest_eip(un 63.13 63.14 #include <asm/domain_page.h> 63.15 63.16 -static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) 63.17 +static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 63.18 { 63.19 struct exec_domain *ed = current; 63.20 unsigned long eip; 63.21 @@ -154,7 +154,7 @@ static int vmx_do_page_fault(unsigned lo 63.22 return result; 63.23 } 63.24 63.25 -static void vmx_do_general_protection_fault(struct xen_regs *regs) 63.26 +static void vmx_do_general_protection_fault(struct cpu_user_regs *regs) 63.27 { 63.28 unsigned long eip, error_code; 63.29 unsigned long intr_fields; 63.30 @@ -181,7 +181,7 @@ static void vmx_do_general_protection_fa 63.31 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 63.32 } 63.33 63.34 -static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) 63.35 +static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 63.36 { 63.37 unsigned int eax, ebx, ecx, edx; 63.38 unsigned long eip; 63.39 @@ -217,7 +217,7 @@ static void vmx_vmexit_do_cpuid(unsigned 63.40 #define CASE_GET_REG_P(REG, reg) \ 63.41 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break 63.42 63.43 -static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs) 63.44 +static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs) 63.45 { 63.46 unsigned int reg; 63.47 unsigned long *reg_p = 0; 63.48 @@ -250,18 +250,18 @@ static void vmx_dr_access (unsigned long 63.49 case TYPE_MOV_TO_DR: 63.50 /* don't need to check the range */ 63.51 if (reg != REG_ESP) 63.52 - ed->arch.debugreg[reg] = *reg_p; 63.53 + ed->arch.guest_context.debugreg[reg] = *reg_p; 63.54 else { 63.55 unsigned long value; 63.56 __vmread(GUEST_ESP, &value); 63.57 - ed->arch.debugreg[reg] = value; 63.58 + ed->arch.guest_context.debugreg[reg] = value; 63.59 } 63.60 break; 63.61 case TYPE_MOV_FROM_DR: 63.62 if (reg != REG_ESP) 63.63 - *reg_p = ed->arch.debugreg[reg]; 63.64 + *reg_p = ed->arch.guest_context.debugreg[reg]; 63.65 else { 63.66 - __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]); 63.67 + __vmwrite(GUEST_ESP, ed->arch.guest_context.debugreg[reg]); 63.68 } 63.69 break; 63.70 } 63.71 @@ -288,7 +288,7 @@ static void vmx_vmexit_do_invlpg(unsigne 63.72 shadow_invlpg(ed, va); 63.73 } 63.74 63.75 -static void vmx_io_instruction(struct xen_regs *regs, 63.76 +static void vmx_io_instruction(struct cpu_user_regs *regs, 63.77 unsigned long exit_qualification, unsigned long inst_len) 63.78 { 63.79 struct exec_domain *d = current; 63.80 @@ -728,7 +728,7 @@ static int vmx_set_cr0(unsigned long val 63.81 /* 63.82 * Write to control registers 63.83 */ 63.84 -static int mov_to_cr(int gp, int cr, struct xen_regs *regs) 63.85 +static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs) 63.86 { 63.87 unsigned long value; 63.88 unsigned long old_cr; 63.89 @@ -847,7 +847,7 @@ static int mov_to_cr(int gp, int cr, str 63.90 /* 63.91 * Read from control registers. CR0 and CR4 are read from the shadow. 63.92 */ 63.93 -static void mov_from_cr(int cr, int gp, struct xen_regs *regs) 63.94 +static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs) 63.95 { 63.96 unsigned long value; 63.97 struct exec_domain *d = current; 63.98 @@ -878,7 +878,7 @@ static void mov_from_cr(int cr, int gp, 63.99 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value); 63.100 } 63.101 63.102 -static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs) 63.103 +static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs) 63.104 { 63.105 unsigned int gp, cr; 63.106 unsigned long value; 63.107 @@ -916,7 +916,7 @@ static int vmx_cr_access(unsigned long e 63.108 return 1; 63.109 } 63.110 63.111 -static inline void vmx_do_msr_read(struct xen_regs *regs) 63.112 +static inline void vmx_do_msr_read(struct cpu_user_regs *regs) 63.113 { 63.114 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx", 63.115 (unsigned long)regs->ecx, (unsigned long)regs->eax, 63.116 @@ -973,7 +973,7 @@ static void vmx_print_line(const char c, 63.117 print_buf[index++] = c; 63.118 } 63.119 63.120 -void save_vmx_execution_context(execution_context_t *ctxt) 63.121 +void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt) 63.122 { 63.123 __vmread(GUEST_SS_SELECTOR, &ctxt->ss); 63.124 __vmread(GUEST_ESP, &ctxt->esp); 63.125 @@ -988,7 +988,7 @@ void save_vmx_execution_context(executio 63.126 } 63.127 63.128 #ifdef XEN_DEBUGGER 63.129 -void save_xen_regs(struct xen_regs *regs) 63.130 +void save_cpu_user_regs(struct cpu_user_regs *regs) 63.131 { 63.132 __vmread(GUEST_SS_SELECTOR, ®s->xss); 63.133 __vmread(GUEST_ESP, ®s->esp); 63.134 @@ -1002,7 +1002,7 @@ void save_xen_regs(struct xen_regs *regs 63.135 __vmread(GUEST_DS_SELECTOR, ®s->xds); 63.136 } 63.137 63.138 -void restore_xen_regs(struct xen_regs *regs) 63.139 +void restore_cpu_user_regs(struct cpu_user_regs *regs) 63.140 { 63.141 __vmwrite(GUEST_SS_SELECTOR, regs->xss); 63.142 __vmwrite(GUEST_ESP, regs->esp); 63.143 @@ -1017,7 +1017,7 @@ void restore_xen_regs(struct xen_regs *r 63.144 } 63.145 #endif 63.146 63.147 -asmlinkage void vmx_vmexit_handler(struct xen_regs regs) 63.148 +asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs) 63.149 { 63.150 unsigned int exit_reason, idtv_info_field; 63.151 unsigned long exit_qualification, eip, inst_len = 0; 63.152 @@ -1080,16 +1080,16 @@ asmlinkage void vmx_vmexit_handler(struc 63.153 #ifdef XEN_DEBUGGER 63.154 case TRAP_debug: 63.155 { 63.156 - save_xen_regs(®s); 63.157 + save_cpu_user_regs(®s); 63.158 pdb_handle_exception(1, ®s, 1); 63.159 - restore_xen_regs(®s); 63.160 + restore_cpu_user_regs(®s); 63.161 break; 63.162 } 63.163 case TRAP_int3: 63.164 { 63.165 - save_xen_regs(®s); 63.166 + save_cpu_user_regs(®s); 63.167 pdb_handle_exception(3, ®s, 1); 63.168 - restore_xen_regs(®s); 63.169 + restore_cpu_user_regs(®s); 63.170 break; 63.171 } 63.172 #endif 63.173 @@ -1139,9 +1139,9 @@ asmlinkage void vmx_vmexit_handler(struc 63.174 case EXIT_REASON_EXTERNAL_INTERRUPT: 63.175 { 63.176 extern int vector_irq[]; 63.177 - extern asmlinkage void do_IRQ(struct xen_regs *); 63.178 - extern void smp_apic_timer_interrupt(struct xen_regs *); 63.179 - extern void timer_interrupt(int, void *, struct xen_regs *); 63.180 + extern asmlinkage void do_IRQ(struct cpu_user_regs *); 63.181 + extern void smp_apic_timer_interrupt(struct cpu_user_regs *); 63.182 + extern void timer_interrupt(int, void *, struct cpu_user_regs *); 63.183 unsigned int vector; 63.184 63.185 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
64.1 --- a/xen/arch/x86/vmx_intercept.c Thu Apr 28 13:54:01 2005 +0000 64.2 +++ b/xen/arch/x86/vmx_intercept.c Fri Apr 29 07:34:47 2005 +0000 64.3 @@ -140,19 +140,19 @@ static int pit_read_io(struct vmx_virpit 64.4 /* vmx_io_assist light-weight version, specific to PIT DM */ 64.5 static void resume_pit_io(ioreq_t *p) 64.6 { 64.7 - execution_context_t *ec = get_execution_context(); 64.8 - unsigned long old_eax = ec->eax; 64.9 + struct cpu_user_regs *regs = get_cpu_user_regs(); 64.10 + unsigned long old_eax = regs->eax; 64.11 p->state = STATE_INVALID; 64.12 64.13 switch(p->size) { 64.14 case 1: 64.15 - ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 64.16 + regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 64.17 break; 64.18 case 2: 64.19 - ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); 64.20 + regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); 64.21 break; 64.22 case 4: 64.23 - ec->eax = (p->u.data & 0xffffffff); 64.24 + regs->eax = (p->u.data & 0xffffffff); 64.25 break; 64.26 default: 64.27 BUG();
65.1 --- a/xen/arch/x86/vmx_io.c Thu Apr 28 13:54:01 2005 +0000 65.2 +++ b/xen/arch/x86/vmx_io.c Fri Apr 29 07:34:47 2005 +0000 65.3 @@ -38,7 +38,7 @@ 65.4 extern long do_block(); 65.5 65.6 #if defined (__i386__) 65.7 -static void load_xen_regs(struct xen_regs *regs) 65.8 +static void load_cpu_user_regs(struct cpu_user_regs *regs) 65.9 { 65.10 /* 65.11 * Write the guest register value into VMCS 65.12 @@ -50,7 +50,7 @@ static void load_xen_regs(struct xen_reg 65.13 __vmwrite(GUEST_EIP, regs->eip); 65.14 } 65.15 65.16 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value) 65.17 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) 65.18 { 65.19 switch (size) { 65.20 case BYTE: 65.21 @@ -170,12 +170,12 @@ static void set_reg_value (int size, int 65.22 } 65.23 } 65.24 #else 65.25 -static void load_xen_regs(struct xen_regs *regs) 65.26 +static void load_cpu_user_regs(struct cpu_user_regs *regs) 65.27 { 65.28 /* XXX: TBD */ 65.29 return; 65.30 } 65.31 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value) 65.32 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) 65.33 { 65.34 /* XXX: TBD */ 65.35 return; 65.36 @@ -187,11 +187,11 @@ void vmx_io_assist(struct exec_domain *e 65.37 vcpu_iodata_t *vio; 65.38 ioreq_t *p; 65.39 struct domain *d = ed->domain; 65.40 - execution_context_t *ec = get_execution_context(); 65.41 + struct cpu_user_regs *regs = get_cpu_user_regs(); 65.42 unsigned long old_eax; 65.43 int sign; 65.44 struct mi_per_cpu_info *mpci_p; 65.45 - struct xen_regs *inst_decoder_regs; 65.46 + struct cpu_user_regs *inst_decoder_regs; 65.47 65.48 mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci; 65.49 inst_decoder_regs = mpci_p->inst_decoder_regs; 65.50 @@ -230,8 +230,8 @@ void vmx_io_assist(struct exec_domain *e 65.51 sign = (p->df) ? -1 : 1; 65.52 if (p->port_mm) { 65.53 if (p->pdata_valid) { 65.54 - ec->esi += sign * p->count * p->size; 65.55 - ec->edi += sign * p->count * p->size; 65.56 + regs->esi += sign * p->count * p->size; 65.57 + regs->edi += sign * p->count * p->size; 65.58 } else { 65.59 if (p->dir == IOREQ_WRITE) { 65.60 return; 65.61 @@ -244,38 +244,38 @@ void vmx_io_assist(struct exec_domain *e 65.62 if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) { 65.63 p->u.data = p->u.data & 0xffff; 65.64 } 65.65 - set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data); 65.66 + set_reg_value(size, index, 0, regs, p->u.data); 65.67 65.68 } 65.69 - load_xen_regs((struct xen_regs *)ec); 65.70 + load_cpu_user_regs(regs); 65.71 return; 65.72 } 65.73 65.74 if (p->dir == IOREQ_WRITE) { 65.75 if (p->pdata_valid) { 65.76 - ec->esi += sign * p->count * p->size; 65.77 - ec->ecx -= p->count; 65.78 + regs->esi += sign * p->count * p->size; 65.79 + regs->ecx -= p->count; 65.80 } 65.81 return; 65.82 } else { 65.83 if (p->pdata_valid) { 65.84 - ec->edi += sign * p->count * p->size; 65.85 - ec->ecx -= p->count; 65.86 + regs->edi += sign * p->count * p->size; 65.87 + regs->ecx -= p->count; 65.88 return; 65.89 } 65.90 } 65.91 65.92 - old_eax = ec->eax; 65.93 + old_eax = regs->eax; 65.94 65.95 switch(p->size) { 65.96 case 1: 65.97 - ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 65.98 + regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 65.99 break; 65.100 case 2: 65.101 - ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); 65.102 + regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); 65.103 break; 65.104 case 4: 65.105 - ec->eax = (p->u.data & 0xffffffff); 65.106 + regs->eax = (p->u.data & 0xffffffff); 65.107 break; 65.108 default: 65.109 BUG();
66.1 --- a/xen/arch/x86/vmx_platform.c Thu Apr 28 13:54:01 2005 +0000 66.2 +++ b/xen/arch/x86/vmx_platform.c Fri Apr 29 07:34:47 2005 +0000 66.3 @@ -39,17 +39,17 @@ 66.4 #define DECODE_failure 0 66.5 66.6 #if defined (__x86_64__) 66.7 -static void store_xen_regs(struct xen_regs *regs) 66.8 +static void store_cpu_user_regs(struct cpu_user_regs *regs) 66.9 { 66.10 66.11 } 66.12 66.13 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 66.14 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 66.15 { 66.16 return 0; 66.17 } 66.18 #elif defined (__i386__) 66.19 -static void store_xen_regs(struct xen_regs *regs) 66.20 +static void store_cpu_user_regs(struct cpu_user_regs *regs) 66.21 { 66.22 __vmread(GUEST_SS_SELECTOR, ®s->ss); 66.23 __vmread(GUEST_ESP, ®s->esp); 66.24 @@ -60,7 +60,7 @@ static void store_xen_regs(struct xen_re 66.25 __vmread(GUEST_EIP, ®s->eip); 66.26 } 66.27 66.28 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 66.29 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 66.30 { 66.31 /* 66.32 * Reference the db_reg[] table 66.33 @@ -468,7 +468,7 @@ static void send_mmio_req(unsigned long 66.34 ioreq_t *p; 66.35 int vm86; 66.36 struct mi_per_cpu_info *mpci_p; 66.37 - struct xen_regs *inst_decoder_regs; 66.38 + struct cpu_user_regs *inst_decoder_regs; 66.39 extern long evtchn_send(int lport); 66.40 extern long do_block(void); 66.41 66.42 @@ -528,7 +528,7 @@ void handle_mmio(unsigned long va, unsig 66.43 unsigned long eip, eflags, cs; 66.44 unsigned long inst_len, inst_addr; 66.45 struct mi_per_cpu_info *mpci_p; 66.46 - struct xen_regs *inst_decoder_regs; 66.47 + struct cpu_user_regs *inst_decoder_regs; 66.48 struct instruction mmio_inst; 66.49 unsigned char inst[MAX_INST_LEN]; 66.50 int vm86, ret; 66.51 @@ -569,7 +569,7 @@ void handle_mmio(unsigned long va, unsig 66.52 domain_crash_synchronous(); 66.53 66.54 __vmwrite(GUEST_EIP, eip + inst_len); 66.55 - store_xen_regs(inst_decoder_regs); 66.56 + store_cpu_user_regs(inst_decoder_regs); 66.57 66.58 // Only handle "mov" and "movs" instructions! 66.59 if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
67.1 --- a/xen/arch/x86/vmx_vmcs.c Thu Apr 28 13:54:01 2005 +0000 67.2 +++ b/xen/arch/x86/vmx_vmcs.c Fri Apr 29 07:34:47 2005 +0000 67.3 @@ -100,7 +100,7 @@ struct host_execution_env { 67.4 67.5 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */ 67.6 67.7 -int vmx_setup_platform(struct exec_domain *d, execution_context_t *context) 67.8 +int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs) 67.9 { 67.10 int i; 67.11 unsigned int n; 67.12 @@ -108,15 +108,15 @@ int vmx_setup_platform(struct exec_domai 67.13 struct e820entry *e820p; 67.14 unsigned long gpfn = 0; 67.15 67.16 - context->ebx = 0; /* Linux expects ebx to be 0 for boot proc */ 67.17 + regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */ 67.18 67.19 - n = context->ecx; 67.20 + n = regs->ecx; 67.21 if (n > 32) { 67.22 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n); 67.23 return -1; 67.24 } 67.25 67.26 - addr = context->edi; 67.27 + addr = regs->edi; 67.28 offset = (addr & ~PAGE_MASK); 67.29 addr = round_pgdown(addr); 67.30 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); 67.31 @@ -162,14 +162,14 @@ void vmx_do_launch(struct exec_domain *e 67.32 struct Xgt_desc_struct desc; 67.33 unsigned long pfn = 0; 67.34 struct pfn_info *page; 67.35 - execution_context_t *ec = get_execution_context(); 67.36 + struct cpu_user_regs *regs = get_cpu_user_regs(); 67.37 67.38 cpu = smp_processor_id(); 67.39 67.40 page = (struct pfn_info *) alloc_domheap_page(NULL); 67.41 pfn = (unsigned long) (page - frame_table); 67.42 67.43 - vmx_setup_platform(ed, ec); 67.44 + vmx_setup_platform(ed, regs); 67.45 67.46 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory"); 67.47 host_env.gdtr_limit = desc.size; 67.48 @@ -202,8 +202,8 @@ void vmx_do_launch(struct exec_domain *e 67.49 * Initially set the same environement as host. 67.50 */ 67.51 static inline int 67.52 -construct_init_vmcs_guest(execution_context_t *context, 67.53 - full_execution_context_t *full_context, 67.54 +construct_init_vmcs_guest(struct cpu_user_regs *regs, 67.55 + struct vcpu_guest_context *ctxt, 67.56 struct host_execution_env *host_env) 67.57 { 67.58 int error = 0; 67.59 @@ -232,12 +232,12 @@ construct_init_vmcs_guest(execution_cont 67.60 error |= __vmwrite(CR3_TARGET_COUNT, 0); 67.61 67.62 /* Guest Selectors */ 67.63 - error |= __vmwrite(GUEST_CS_SELECTOR, context->cs); 67.64 - error |= __vmwrite(GUEST_ES_SELECTOR, context->es); 67.65 - error |= __vmwrite(GUEST_SS_SELECTOR, context->ss); 67.66 - error |= __vmwrite(GUEST_DS_SELECTOR, context->ds); 67.67 - error |= __vmwrite(GUEST_FS_SELECTOR, context->fs); 67.68 - error |= __vmwrite(GUEST_GS_SELECTOR, context->gs); 67.69 + error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs); 67.70 + error |= __vmwrite(GUEST_ES_SELECTOR, regs->es); 67.71 + error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss); 67.72 + error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds); 67.73 + error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs); 67.74 + error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs); 67.75 67.76 /* Guest segment Limits */ 67.77 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT); 67.78 @@ -268,10 +268,10 @@ construct_init_vmcs_guest(execution_cont 67.79 arbytes.fields.seg_type = 0xb; /* type = 0xb */ 67.80 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes); 67.81 67.82 - error |= __vmwrite(GUEST_GDTR_BASE, context->edx); 67.83 - context->edx = 0; 67.84 - error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax); 67.85 - context->eax = 0; 67.86 + error |= __vmwrite(GUEST_GDTR_BASE, regs->edx); 67.87 + regs->edx = 0; 67.88 + error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax); 67.89 + regs->eax = 0; 67.90 67.91 arbytes.fields.s = 0; /* not code or data segement */ 67.92 arbytes.fields.seg_type = 0x2; /* LTD */ 67.93 @@ -302,10 +302,10 @@ construct_init_vmcs_guest(execution_cont 67.94 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base); 67.95 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base); 67.96 67.97 - error |= __vmwrite(GUEST_ESP, context->esp); 67.98 - error |= __vmwrite(GUEST_EIP, context->eip); 67.99 + error |= __vmwrite(GUEST_ESP, regs->esp); 67.100 + error |= __vmwrite(GUEST_EIP, regs->eip); 67.101 67.102 - eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */ 67.103 + eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */ 67.104 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */ 67.105 67.106 error |= __vmwrite(GUEST_EFLAGS, eflags); 67.107 @@ -380,8 +380,8 @@ static inline int construct_vmcs_host(st 67.108 */ 67.109 67.110 int construct_vmcs(struct arch_vmx_struct *arch_vmx, 67.111 - execution_context_t *context, 67.112 - full_execution_context_t *full_context, 67.113 + struct cpu_user_regs *regs, 67.114 + struct vcpu_guest_context *ctxt, 67.115 int use_host_env) 67.116 { 67.117 int error; 67.118 @@ -415,7 +415,7 @@ int construct_vmcs(struct arch_vmx_struc 67.119 return -EINVAL; 67.120 } 67.121 /* guest selectors */ 67.122 - if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) { 67.123 + if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) { 67.124 printk("construct_vmcs: construct_vmcs_guest failed\n"); 67.125 return -EINVAL; 67.126 }
68.1 --- a/xen/arch/x86/x86_32/asm-offsets.c Thu Apr 28 13:54:01 2005 +0000 68.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c Fri Apr 29 07:34:47 2005 +0000 68.3 @@ -24,38 +24,44 @@ 68.4 68.5 void __dummy__(void) 68.6 { 68.7 - OFFSET(XREGS_eax, struct xen_regs, eax); 68.8 - OFFSET(XREGS_ebx, struct xen_regs, ebx); 68.9 - OFFSET(XREGS_ecx, struct xen_regs, ecx); 68.10 - OFFSET(XREGS_edx, struct xen_regs, edx); 68.11 - OFFSET(XREGS_esi, struct xen_regs, esi); 68.12 - OFFSET(XREGS_edi, struct xen_regs, edi); 68.13 - OFFSET(XREGS_esp, struct xen_regs, esp); 68.14 - OFFSET(XREGS_ebp, struct xen_regs, ebp); 68.15 - OFFSET(XREGS_eip, struct xen_regs, eip); 68.16 - OFFSET(XREGS_cs, struct xen_regs, cs); 68.17 - OFFSET(XREGS_ds, struct xen_regs, ds); 68.18 - OFFSET(XREGS_es, struct xen_regs, es); 68.19 - OFFSET(XREGS_fs, struct xen_regs, fs); 68.20 - OFFSET(XREGS_gs, struct xen_regs, gs); 68.21 - OFFSET(XREGS_ss, struct xen_regs, ss); 68.22 - OFFSET(XREGS_eflags, struct xen_regs, eflags); 68.23 - OFFSET(XREGS_error_code, struct xen_regs, error_code); 68.24 - OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector); 68.25 - OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp); 68.26 - DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs)); 68.27 + OFFSET(UREGS_eax, struct cpu_user_regs, eax); 68.28 + OFFSET(UREGS_ebx, struct cpu_user_regs, ebx); 68.29 + OFFSET(UREGS_ecx, struct cpu_user_regs, ecx); 68.30 + OFFSET(UREGS_edx, struct cpu_user_regs, edx); 68.31 + OFFSET(UREGS_esi, struct cpu_user_regs, esi); 68.32 + OFFSET(UREGS_edi, struct cpu_user_regs, edi); 68.33 + OFFSET(UREGS_esp, struct cpu_user_regs, esp); 68.34 + OFFSET(UREGS_ebp, struct cpu_user_regs, ebp); 68.35 + OFFSET(UREGS_eip, struct cpu_user_regs, eip); 68.36 + OFFSET(UREGS_cs, struct cpu_user_regs, cs); 68.37 + OFFSET(UREGS_ds, struct cpu_user_regs, ds); 68.38 + OFFSET(UREGS_es, struct cpu_user_regs, es); 68.39 + OFFSET(UREGS_fs, struct cpu_user_regs, fs); 68.40 + OFFSET(UREGS_gs, struct cpu_user_regs, gs); 68.41 + OFFSET(UREGS_ss, struct cpu_user_regs, ss); 68.42 + OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); 68.43 + OFFSET(UREGS_error_code, struct cpu_user_regs, error_code); 68.44 + OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector); 68.45 + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp); 68.46 + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); 68.47 BLANK(); 68.48 68.49 OFFSET(EDOMAIN_processor, struct exec_domain, processor); 68.50 OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info); 68.51 - OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector); 68.52 - OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address); 68.53 - OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector); 68.54 - OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address); 68.55 OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce); 68.56 OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags); 68.57 - OFFSET(EDOMAIN_kernel_ss, struct exec_domain, arch.kernel_ss); 68.58 - OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp); 68.59 + OFFSET(EDOMAIN_event_sel, struct exec_domain, 68.60 + arch.guest_context.event_callback_cs); 68.61 + OFFSET(EDOMAIN_event_addr, struct exec_domain, 68.62 + arch.guest_context.event_callback_eip); 68.63 + OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, 68.64 + arch.guest_context.failsafe_callback_cs); 68.65 + OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, 68.66 + arch.guest_context.failsafe_callback_eip); 68.67 + OFFSET(EDOMAIN_kernel_ss, struct exec_domain, 68.68 + arch.guest_context.kernel_ss); 68.69 + OFFSET(EDOMAIN_kernel_sp, struct exec_domain, 68.70 + arch.guest_context.kernel_sp); 68.71 BLANK(); 68.72 68.73 OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
69.1 --- a/xen/arch/x86/x86_32/call_with_regs.S Thu Apr 28 13:54:01 2005 +0000 69.2 +++ b/xen/arch/x86/x86_32/call_with_regs.S Fri Apr 29 07:34:47 2005 +0000 69.3 @@ -2,35 +2,35 @@ 69.4 69.5 #include <asm/asm-offsets.h> 69.6 69.7 - // int call_with_registers(void (*f)(struct xen_regs *r)) -> 69.8 - // build a xen_regs structure, and then call f with that. 69.9 + // int call_with_registers(void (*f)(struct cpu_user_regs *r)) -> 69.10 + // build a cpu_user_regs structure, and then call f with that. 69.11 call_with_registers: 69.12 pushf 69.13 - subl $XREGS_user_sizeof, %esp 69.14 - movl %ebx, XREGS_ebx(%esp) 69.15 - movl %ecx, XREGS_ecx(%esp) 69.16 - movl %edx, XREGS_edx(%esp) 69.17 - movl %esi, XREGS_esi(%esp) 69.18 - movl %edi, XREGS_edi(%esp) 69.19 - movl %ebp, XREGS_ebp(%esp) 69.20 - movl %eax, XREGS_eax(%esp) 69.21 - movw $0, XREGS_error_code(%esp) 69.22 - movw $0, XREGS_entry_vector(%esp) 69.23 - movl XREGS_user_sizeof+4(%esp), %eax 69.24 - movl %eax, XREGS_eip(%esp) 69.25 - movl %cs, XREGS_cs(%esp) 69.26 - movl XREGS_user_sizeof(%esp), %eax 69.27 - movl %eax, XREGS_eflags(%esp) 69.28 - movl %esp, XREGS_esp(%esp) 69.29 - addl $XREGS_user_sizeof+4, XREGS_esp(%esp) 69.30 - movl %ss, XREGS_ss(%esp) 69.31 - movl %es, XREGS_es(%esp) 69.32 - movl %ds, XREGS_ds(%esp) 69.33 - movl %fs, XREGS_fs(%esp) 69.34 - movl %gs, XREGS_gs(%esp) 69.35 + subl $UREGS_user_sizeof, %esp 69.36 + movl %ebx, UREGS_ebx(%esp) 69.37 + movl %ecx, UREGS_ecx(%esp) 69.38 + movl %edx, UREGS_edx(%esp) 69.39 + movl %esi, UREGS_esi(%esp) 69.40 + movl %edi, UREGS_edi(%esp) 69.41 + movl %ebp, UREGS_ebp(%esp) 69.42 + movl %eax, UREGS_eax(%esp) 69.43 + movw $0, UREGS_error_code(%esp) 69.44 + movw $0, UREGS_entry_vector(%esp) 69.45 + movl UREGS_user_sizeof+4(%esp), %eax 69.46 + movl %eax, UREGS_eip(%esp) 69.47 + movl %cs, UREGS_cs(%esp) 69.48 + movl UREGS_user_sizeof(%esp), %eax 69.49 + movl %eax, UREGS_eflags(%esp) 69.50 + movl %esp, UREGS_esp(%esp) 69.51 + addl $UREGS_user_sizeof+4, UREGS_esp(%esp) 69.52 + movl %ss, UREGS_ss(%esp) 69.53 + movl %es, UREGS_es(%esp) 69.54 + movl %ds, UREGS_ds(%esp) 69.55 + movl %fs, UREGS_fs(%esp) 69.56 + movl %gs, UREGS_gs(%esp) 69.57 69.58 - movl XREGS_user_sizeof+8(%esp), %eax 69.59 + movl UREGS_user_sizeof+8(%esp), %eax 69.60 pushl %esp 69.61 call *%eax 69.62 - add $XREGS_user_sizeof + 8, %esp 69.63 + add $UREGS_user_sizeof + 8, %esp 69.64 ret
70.1 --- a/xen/arch/x86/x86_32/entry.S Thu Apr 28 13:54:01 2005 +0000 70.2 +++ b/xen/arch/x86/x86_32/entry.S Fri Apr 29 07:34:47 2005 +0000 70.3 @@ -76,7 +76,7 @@ 70.4 * and we set it to the fixed value. 70.5 * 70.6 * We also need the room, especially because orig_eax field is used 70.7 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following: 70.8 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following: 70.9 * (10) u32 gs; 70.10 * (9) u32 fs; 70.11 * (8) u32 ds; 70.12 @@ -99,7 +99,7 @@ 70.13 pushl $VMX_MONITOR_EFLAGS; \ 70.14 popf; \ 70.15 subl $(NR_SKIPPED_REGS*4), %esp; \ 70.16 - movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \ 70.17 + movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \ 70.18 pushl %eax; \ 70.19 pushl %ebp; \ 70.20 pushl %edi; \ 70.21 @@ -111,7 +111,7 @@ 70.22 ENTRY(vmx_asm_vmexit_handler) 70.23 /* selectors are restored/saved by VMX */ 70.24 VMX_SAVE_ALL_NOSEGREGS 70.25 - call SYMBOL_NAME(vmx_vmexit_handler) 70.26 + call vmx_vmexit_handler 70.27 jmp vmx_asm_do_resume 70.28 70.29 ENTRY(vmx_asm_do_launch) 70.30 @@ -126,7 +126,7 @@ ENTRY(vmx_asm_do_launch) 70.31 /* VMLUANCH */ 70.32 .byte 0x0f,0x01,0xc2 70.33 pushf 70.34 - call SYMBOL_NAME(vm_launch_fail) 70.35 + call vm_launch_fail 70.36 hlt 70.37 70.38 ALIGN 70.39 @@ -141,11 +141,11 @@ vmx_test_all_events: 70.40 /*test_softirqs:*/ 70.41 movl EDOMAIN_processor(%ebx),%eax 70.42 shl $IRQSTAT_shift,%eax 70.43 - test %ecx,SYMBOL_NAME(irq_stat)(%eax,1) 70.44 + test %ecx,irq_stat(%eax,1) 70.45 jnz vmx_process_softirqs 70.46 70.47 vmx_restore_all_guest: 70.48 - call SYMBOL_NAME(load_cr2) 70.49 + call load_cr2 70.50 /* 70.51 * Check if we are going back to VMX-based VM 70.52 * By this time, all the setups in the VMCS must be complete. 70.53 @@ -161,25 +161,25 @@ vmx_restore_all_guest: 70.54 /* VMRESUME */ 70.55 .byte 0x0f,0x01,0xc3 70.56 pushf 70.57 - call SYMBOL_NAME(vm_resume_fail) 70.58 + call vm_resume_fail 70.59 /* Should never reach here */ 70.60 hlt 70.61 70.62 ALIGN 70.63 vmx_process_softirqs: 70.64 sti 70.65 - call SYMBOL_NAME(do_softirq) 70.66 + call do_softirq 70.67 jmp vmx_test_all_events 70.68 #endif 70.69 70.70 ALIGN 70.71 restore_all_guest: 70.72 - testl $X86_EFLAGS_VM,XREGS_eflags(%esp) 70.73 + testl $X86_EFLAGS_VM,UREGS_eflags(%esp) 70.74 jnz restore_all_vm86 70.75 -FLT1: movl XREGS_ds(%esp),%ds 70.76 -FLT2: movl XREGS_es(%esp),%es 70.77 -FLT3: movl XREGS_fs(%esp),%fs 70.78 -FLT4: movl XREGS_gs(%esp),%gs 70.79 +FLT1: movl UREGS_ds(%esp),%ds 70.80 +FLT2: movl UREGS_es(%esp),%es 70.81 +FLT3: movl UREGS_fs(%esp),%fs 70.82 +FLT4: movl UREGS_gs(%esp),%gs 70.83 restore_all_vm86: 70.84 popl %ebx 70.85 popl %ecx 70.86 @@ -193,13 +193,13 @@ FLT5: iret 70.87 .section .fixup,"ax" 70.88 FIX5: subl $28,%esp 70.89 pushl 28(%esp) # error_code/entry_vector 70.90 - movl %eax,XREGS_eax+4(%esp) 70.91 - movl %ebp,XREGS_ebp+4(%esp) 70.92 - movl %edi,XREGS_edi+4(%esp) 70.93 - movl %esi,XREGS_esi+4(%esp) 70.94 - movl %edx,XREGS_edx+4(%esp) 70.95 - movl %ecx,XREGS_ecx+4(%esp) 70.96 - movl %ebx,XREGS_ebx+4(%esp) 70.97 + movl %eax,UREGS_eax+4(%esp) 70.98 + movl %ebp,UREGS_ebp+4(%esp) 70.99 + movl %edi,UREGS_edi+4(%esp) 70.100 + movl %esi,UREGS_esi+4(%esp) 70.101 + movl %edx,UREGS_edx+4(%esp) 70.102 + movl %ecx,UREGS_ecx+4(%esp) 70.103 + movl %ebx,UREGS_ebx+4(%esp) 70.104 FIX1: SET_XEN_SEGMENTS(a) 70.105 movl %eax,%fs 70.106 movl %eax,%gs 70.107 @@ -224,10 +224,10 @@ failsafe_callback: 70.108 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx) 70.109 call create_bounce_frame 70.110 xorl %eax,%eax 70.111 - movl %eax,XREGS_ds(%esp) 70.112 - movl %eax,XREGS_es(%esp) 70.113 - movl %eax,XREGS_fs(%esp) 70.114 - movl %eax,XREGS_gs(%esp) 70.115 + movl %eax,UREGS_ds(%esp) 70.116 + movl %eax,UREGS_es(%esp) 70.117 + movl %eax,UREGS_fs(%esp) 70.118 + movl %eax,UREGS_gs(%esp) 70.119 jmp test_all_events 70.120 .previous 70.121 .section __pre_ex_table,"a" 70.122 @@ -261,8 +261,8 @@ ENTRY(hypercall) 70.123 GET_CURRENT(%ebx) 70.124 andl $(NR_hypercalls-1),%eax 70.125 PERFC_INCR(PERFC_hypercalls, %eax) 70.126 - call *SYMBOL_NAME(hypercall_table)(,%eax,4) 70.127 - movl %eax,XREGS_eax(%esp) # save the return value 70.128 + call *hypercall_table(,%eax,4) 70.129 + movl %eax,UREGS_eax(%esp) # save the return value 70.130 70.131 test_all_events: 70.132 xorl %ecx,%ecx 70.133 @@ -271,7 +271,7 @@ test_all_events: 70.134 /*test_softirqs:*/ 70.135 movl EDOMAIN_processor(%ebx),%eax 70.136 shl $IRQSTAT_shift,%eax 70.137 - test %ecx,SYMBOL_NAME(irq_stat)(%eax,1) 70.138 + test %ecx,irq_stat(%eax,1) 70.139 jnz process_softirqs 70.140 /*test_guest_events:*/ 70.141 movl EDOMAIN_vcpu_info(%ebx),%eax 70.142 @@ -295,47 +295,47 @@ test_all_events: 70.143 ALIGN 70.144 process_softirqs: 70.145 sti 70.146 - call SYMBOL_NAME(do_softirq) 70.147 + call do_softirq 70.148 jmp test_all_events 70.149 70.150 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ 70.151 /* {EIP, CS, EFLAGS, [ESP, SS]} */ 70.152 /* %edx == trap_bounce, %ebx == struct exec_domain */ 70.153 -/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */ 70.154 +/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */ 70.155 create_bounce_frame: 70.156 - movl XREGS_eflags+4(%esp),%ecx 70.157 - movb XREGS_cs+4(%esp),%cl 70.158 + movl UREGS_eflags+4(%esp),%ecx 70.159 + movb UREGS_cs+4(%esp),%cl 70.160 testl $(2|X86_EFLAGS_VM),%ecx 70.161 jz ring1 /* jump if returning to an existing ring-1 activation */ 70.162 movl EDOMAIN_kernel_sp(%ebx),%esi 70.163 FLT6: movl EDOMAIN_kernel_ss(%ebx),%gs 70.164 - testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 70.165 + testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) 70.166 jz nvm86_1 70.167 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */ 70.168 - movl XREGS_es+4(%esp),%eax 70.169 + movl UREGS_es+4(%esp),%eax 70.170 FLT7: movl %eax,%gs:(%esi) 70.171 - movl XREGS_ds+4(%esp),%eax 70.172 + movl UREGS_ds+4(%esp),%eax 70.173 FLT8: movl %eax,%gs:4(%esi) 70.174 - movl XREGS_fs+4(%esp),%eax 70.175 + movl UREGS_fs+4(%esp),%eax 70.176 FLT9: movl %eax,%gs:8(%esi) 70.177 - movl XREGS_gs+4(%esp),%eax 70.178 + movl UREGS_gs+4(%esp),%eax 70.179 FLT10: movl %eax,%gs:12(%esi) 70.180 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */ 70.181 - movl XREGS_esp+4(%esp),%eax 70.182 + movl UREGS_esp+4(%esp),%eax 70.183 FLT11: movl %eax,%gs:(%esi) 70.184 - movl XREGS_ss+4(%esp),%eax 70.185 + movl UREGS_ss+4(%esp),%eax 70.186 FLT12: movl %eax,%gs:4(%esi) 70.187 jmp 1f 70.188 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ 70.189 - movl XREGS_esp+4(%esp),%esi 70.190 -FLT13: movl XREGS_ss+4(%esp),%gs 70.191 + movl UREGS_esp+4(%esp),%esi 70.192 +FLT13: movl UREGS_ss+4(%esp),%gs 70.193 1: /* Construct a stack frame: EFLAGS, CS/EIP */ 70.194 subl $12,%esi 70.195 - movl XREGS_eip+4(%esp),%eax 70.196 + movl UREGS_eip+4(%esp),%eax 70.197 FLT14: movl %eax,%gs:(%esi) 70.198 - movl XREGS_cs+4(%esp),%eax 70.199 + movl UREGS_cs+4(%esp),%eax 70.200 FLT15: movl %eax,%gs:4(%esi) 70.201 - movl XREGS_eflags+4(%esp),%eax 70.202 + movl UREGS_eflags+4(%esp),%eax 70.203 FLT16: movl %eax,%gs:8(%esi) 70.204 movb TRAPBOUNCE_flags(%edx),%cl 70.205 test $TBF_EXCEPTION_ERRCODE,%cl 70.206 @@ -351,7 +351,7 @@ FLT18: movl %eax,%gs:(%esi) 70.207 1: testb $TBF_FAILSAFE,%cl 70.208 jz 2f 70.209 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame 70.210 - testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 70.211 + testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) 70.212 jz nvm86_2 70.213 xorl %eax,%eax # VM86: we write zero selector values 70.214 FLT19: movl %eax,%gs:(%esi) 70.215 @@ -359,30 +359,30 @@ FLT20: movl %eax,%gs:4(%esi) 70.216 FLT21: movl %eax,%gs:8(%esi) 70.217 FLT22: movl %eax,%gs:12(%esi) 70.218 jmp 2f 70.219 -nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values 70.220 +nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values 70.221 FLT23: movl %eax,%gs:(%esi) 70.222 - movl XREGS_es+4(%esp),%eax 70.223 + movl UREGS_es+4(%esp),%eax 70.224 FLT24: movl %eax,%gs:4(%esi) 70.225 - movl XREGS_fs+4(%esp),%eax 70.226 + movl UREGS_fs+4(%esp),%eax 70.227 FLT25: movl %eax,%gs:8(%esi) 70.228 - movl XREGS_gs+4(%esp),%eax 70.229 + movl UREGS_gs+4(%esp),%eax 70.230 FLT26: movl %eax,%gs:12(%esi) 70.231 -2: testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 70.232 +2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) 70.233 jz nvm86_3 70.234 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */ 70.235 - movl %eax,XREGS_ds+4(%esp) 70.236 - movl %eax,XREGS_es+4(%esp) 70.237 - movl %eax,XREGS_fs+4(%esp) 70.238 - movl %eax,XREGS_gs+4(%esp) 70.239 + movl %eax,UREGS_ds+4(%esp) 70.240 + movl %eax,UREGS_es+4(%esp) 70.241 + movl %eax,UREGS_fs+4(%esp) 70.242 + movl %eax,UREGS_gs+4(%esp) 70.243 nvm86_3:/* Rewrite our stack frame and return to ring 1. */ 70.244 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ 70.245 - andl $0xfffcbeff,XREGS_eflags+4(%esp) 70.246 - movl %gs,XREGS_ss+4(%esp) 70.247 - movl %esi,XREGS_esp+4(%esp) 70.248 + andl $0xfffcbeff,UREGS_eflags+4(%esp) 70.249 + movl %gs,UREGS_ss+4(%esp) 70.250 + movl %esi,UREGS_esp+4(%esp) 70.251 movzwl TRAPBOUNCE_cs(%edx),%eax 70.252 - movl %eax,XREGS_cs+4(%esp) 70.253 + movl %eax,UREGS_cs+4(%esp) 70.254 movl TRAPBOUNCE_eip(%edx),%eax 70.255 - movl %eax,XREGS_eip+4(%esp) 70.256 + movl %eax,UREGS_eip+4(%esp) 70.257 movb $0,TRAPBOUNCE_flags(%edx) 70.258 ret 70.259 .section __ex_table,"a" 70.260 @@ -410,8 +410,8 @@ process_guest_exception_and_events: 70.261 ALIGN 70.262 ENTRY(ret_from_intr) 70.263 GET_CURRENT(%ebx) 70.264 - movl XREGS_eflags(%esp),%eax 70.265 - movb XREGS_cs(%esp),%al 70.266 + movl UREGS_eflags(%esp),%eax 70.267 + movb UREGS_cs(%esp),%al 70.268 testl $(3|X86_EFLAGS_VM),%eax 70.269 jnz test_all_events 70.270 jmp restore_all_xen 70.271 @@ -422,26 +422,26 @@ ENTRY(divide_error) 70.272 error_code: 70.273 SAVE_ALL_NOSEGREGS(a) 70.274 SET_XEN_SEGMENTS(a) 70.275 - testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp) 70.276 + testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp) 70.277 jz exception_with_ints_disabled 70.278 sti # re-enable interrupts 70.279 xorl %eax,%eax 70.280 - movw XREGS_entry_vector(%esp),%ax 70.281 + movw UREGS_entry_vector(%esp),%ax 70.282 movl %esp,%edx 70.283 - pushl %edx # push the xen_regs pointer 70.284 + pushl %edx # push the cpu_user_regs pointer 70.285 GET_CURRENT(%ebx) 70.286 PERFC_INCR(PERFC_exceptions, %eax) 70.287 - call *SYMBOL_NAME(exception_table)(,%eax,4) 70.288 + call *exception_table(,%eax,4) 70.289 addl $4,%esp 70.290 - movl XREGS_eflags(%esp),%eax 70.291 - movb XREGS_cs(%esp),%al 70.292 + movl UREGS_eflags(%esp),%eax 70.293 + movb UREGS_cs(%esp),%al 70.294 testl $(3|X86_EFLAGS_VM),%eax 70.295 jz restore_all_xen 70.296 jmp process_guest_exception_and_events 70.297 70.298 exception_with_ints_disabled: 70.299 - movl XREGS_eflags(%esp),%eax 70.300 - movb XREGS_cs(%esp),%al 70.301 + movl UREGS_eflags(%esp),%eax 70.302 + movb UREGS_cs(%esp),%al 70.303 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen? 70.304 jnz FATAL_exception_with_ints_disabled 70.305 pushl %esp 70.306 @@ -449,23 +449,23 @@ exception_with_ints_disabled: 70.307 addl $4,%esp 70.308 testl %eax,%eax # no fixup code for faulting EIP? 70.309 jz FATAL_exception_with_ints_disabled 70.310 - movl %eax,XREGS_eip(%esp) 70.311 + movl %eax,UREGS_eip(%esp) 70.312 movl %esp,%esi 70.313 subl $4,%esp 70.314 movl %esp,%edi 70.315 - movl $XREGS_kernel_sizeof/4,%ecx 70.316 + movl $UREGS_kernel_sizeof/4,%ecx 70.317 rep; movsl # make room for error_code/entry_vector 70.318 - movl XREGS_error_code(%esp),%eax # error_code/entry_vector 70.319 - movl %eax,XREGS_kernel_sizeof(%esp) 70.320 + movl UREGS_error_code(%esp),%eax # error_code/entry_vector 70.321 + movl %eax,UREGS_kernel_sizeof(%esp) 70.322 jmp restore_all_xen # return to fixup code 70.323 70.324 FATAL_exception_with_ints_disabled: 70.325 xorl %esi,%esi 70.326 - movw XREGS_entry_vector(%esp),%si 70.327 + movw UREGS_entry_vector(%esp),%si 70.328 movl %esp,%edx 70.329 - pushl %edx # push the xen_regs pointer 70.330 + pushl %edx # push the cpu_user_regs pointer 70.331 pushl %esi # push the trapnr (entry vector) 70.332 - call SYMBOL_NAME(fatal_trap) 70.333 + call fatal_trap 70.334 ud2 70.335 70.336 ENTRY(coprocessor_error) 70.337 @@ -557,8 +557,8 @@ ENTRY(nmi) 70.338 # In all other cases we bail without touching DS-GS, as we have 70.339 # interrupted an enclosing Xen activation in tricky prologue or 70.340 # epilogue code. 70.341 - movl XREGS_eflags(%esp),%eax 70.342 - movb XREGS_cs(%esp),%al 70.343 + movl UREGS_eflags(%esp),%eax 70.344 + movb UREGS_cs(%esp),%al 70.345 testl $(3|X86_EFLAGS_VM),%eax 70.346 jnz do_watchdog_tick 70.347 movl %ds,%eax 70.348 @@ -575,7 +575,7 @@ do_watchdog_tick: 70.349 movl %esp,%edx 70.350 pushl %ebx # reason 70.351 pushl %edx # regs 70.352 - call SYMBOL_NAME(do_nmi) 70.353 + call do_nmi 70.354 addl $8,%esp 70.355 jmp ret_from_intr 70.356 70.357 @@ -595,21 +595,21 @@ nmi_parity_err: 70.358 andb $0xf,%al 70.359 orb $0x4,%al 70.360 outb %al,$0x61 70.361 - cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore 70.362 + cmpb $'i',%ss:opt_nmi # nmi=ignore 70.363 je nmi_out 70.364 - bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason) 70.365 - bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) 70.366 - cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 70.367 + bts $0,%ss:nmi_softirq_reason 70.368 + bts $NMI_SOFTIRQ,%ss:irq_stat 70.369 + cmpb $'d',%ss:opt_nmi # nmi=dom0 70.370 je nmi_out 70.371 movl $(__HYPERVISOR_DS),%edx # nmi=fatal 70.372 movl %edx,%ds 70.373 movl %edx,%es 70.374 movl %esp,%edx 70.375 push %edx 70.376 - call SYMBOL_NAME(mem_parity_error) 70.377 + call mem_parity_error 70.378 addl $4,%esp 70.379 -nmi_out:movl %ss:XREGS_eflags(%esp),%eax 70.380 - movb %ss:XREGS_cs(%esp),%al 70.381 +nmi_out:movl %ss:UREGS_eflags(%esp),%eax 70.382 + movb %ss:UREGS_cs(%esp),%al 70.383 testl $(3|X86_EFLAGS_VM),%eax 70.384 jz restore_all_xen 70.385 movl $(__HYPERVISOR_DS),%edx 70.386 @@ -623,18 +623,18 @@ nmi_io_err: 70.387 andb $0xf,%al 70.388 orb $0x8,%al 70.389 outb %al,$0x61 70.390 - cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore 70.391 + cmpb $'i',%ss:opt_nmi # nmi=ignore 70.392 je nmi_out 70.393 - bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason) 70.394 - bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) 70.395 - cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 70.396 + bts $1,%ss:nmi_softirq_reason 70.397 + bts $NMI_SOFTIRQ,%ss:irq_stat 70.398 + cmpb $'d',%ss:opt_nmi # nmi=dom0 70.399 je nmi_out 70.400 movl $(__HYPERVISOR_DS),%edx # nmi=fatal 70.401 movl %edx,%ds 70.402 movl %edx,%es 70.403 movl %esp,%edx 70.404 push %edx 70.405 - call SYMBOL_NAME(io_check_error) 70.406 + call io_check_error 70.407 addl $4,%esp 70.408 jmp nmi_out 70.409 70.410 @@ -652,32 +652,38 @@ ENTRY(setup_vm86_frame) 70.411 addl $16,%esp 70.412 ret 70.413 70.414 +do_arch_sched_op: 70.415 + # Ensure we return success even if we return via schedule_tail() 70.416 + xorl %eax,%eax 70.417 + movl %eax,UREGS_eax+4(%esp) 70.418 + jmp do_sched_op 70.419 + 70.420 do_switch_vm86: 70.421 # Discard the return address 70.422 addl $4,%esp 70.423 70.424 # GS:ESI == Ring-1 stack activation 70.425 - movl XREGS_esp(%esp),%esi 70.426 -VFLT1: movl XREGS_ss(%esp),%gs 70.427 + movl UREGS_esp(%esp),%esi 70.428 +VFLT1: movl UREGS_ss(%esp),%gs 70.429 70.430 # ES:EDI == Ring-0 stack activation 70.431 - leal XREGS_eip(%esp),%edi 70.432 + leal UREGS_eip(%esp),%edi 70.433 70.434 # Restore the hypercall-number-clobbered EAX on our stack frame 70.435 VFLT2: movl %gs:(%esi),%eax 70.436 - movl %eax,XREGS_eax(%esp) 70.437 + movl %eax,UREGS_eax(%esp) 70.438 addl $4,%esi 70.439 70.440 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack 70.441 - movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx 70.442 + movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx 70.443 VFLT3: movl %gs:(%esi),%eax 70.444 stosl 70.445 addl $4,%esi 70.446 loop VFLT3 70.447 70.448 # Fix up EFLAGS: IOPL=0, IF=1, VM=1 70.449 - andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp) 70.450 - orl $X86_EFLAGS_IF|X86_EFLAGS_VM,XREGS_eflags(%esp) 70.451 + andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp) 70.452 + orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp) 70.453 70.454 jmp test_all_events 70.455 70.456 @@ -690,55 +696,55 @@ VFLT3: movl %gs:(%esi),%eax 70.457 .data 70.458 70.459 ENTRY(exception_table) 70.460 - .long SYMBOL_NAME(do_divide_error) 70.461 - .long SYMBOL_NAME(do_debug) 70.462 + .long do_divide_error 70.463 + .long do_debug 70.464 .long 0 # nmi 70.465 - .long SYMBOL_NAME(do_int3) 70.466 - .long SYMBOL_NAME(do_overflow) 70.467 - .long SYMBOL_NAME(do_bounds) 70.468 - .long SYMBOL_NAME(do_invalid_op) 70.469 - .long SYMBOL_NAME(math_state_restore) 70.470 + .long do_int3 70.471 + .long do_overflow 70.472 + .long do_bounds 70.473 + .long do_invalid_op 70.474 + .long math_state_restore 70.475 .long 0 # double fault 70.476 - .long SYMBOL_NAME(do_coprocessor_segment_overrun) 70.477 - .long SYMBOL_NAME(do_invalid_TSS) 70.478 - .long SYMBOL_NAME(do_segment_not_present) 70.479 - .long SYMBOL_NAME(do_stack_segment) 70.480 - .long SYMBOL_NAME(do_general_protection) 70.481 - .long SYMBOL_NAME(do_page_fault) 70.482 - .long SYMBOL_NAME(do_spurious_interrupt_bug) 70.483 - .long SYMBOL_NAME(do_coprocessor_error) 70.484 - .long SYMBOL_NAME(do_alignment_check) 70.485 - .long SYMBOL_NAME(do_machine_check) 70.486 - .long SYMBOL_NAME(do_simd_coprocessor_error) 70.487 + .long do_coprocessor_segment_overrun 70.488 + .long do_invalid_TSS 70.489 + .long do_segment_not_present 70.490 + .long do_stack_segment 70.491 + .long do_general_protection 70.492 + .long do_page_fault 70.493 + .long do_spurious_interrupt_bug 70.494 + .long do_coprocessor_error 70.495 + .long do_alignment_check 70.496 + .long do_machine_check 70.497 + .long do_simd_coprocessor_error 70.498 70.499 ENTRY(hypercall_table) 70.500 - .long SYMBOL_NAME(do_set_trap_table) /* 0 */ 70.501 - .long SYMBOL_NAME(do_mmu_update) 70.502 - .long SYMBOL_NAME(do_set_gdt) 70.503 - .long SYMBOL_NAME(do_stack_switch) 70.504 - .long SYMBOL_NAME(do_set_callbacks) 70.505 - .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ 70.506 - .long SYMBOL_NAME(do_sched_op) 70.507 - .long SYMBOL_NAME(do_dom0_op) 70.508 - .long SYMBOL_NAME(do_set_debugreg) 70.509 - .long SYMBOL_NAME(do_get_debugreg) 70.510 - .long SYMBOL_NAME(do_update_descriptor) /* 10 */ 70.511 - .long SYMBOL_NAME(do_set_fast_trap) 70.512 - .long SYMBOL_NAME(do_dom_mem_op) 70.513 - .long SYMBOL_NAME(do_multicall) 70.514 - .long SYMBOL_NAME(do_update_va_mapping) 70.515 - .long SYMBOL_NAME(do_set_timer_op) /* 15 */ 70.516 - .long SYMBOL_NAME(do_event_channel_op) 70.517 - .long SYMBOL_NAME(do_xen_version) 70.518 - .long SYMBOL_NAME(do_console_io) 70.519 - .long SYMBOL_NAME(do_physdev_op) 70.520 - .long SYMBOL_NAME(do_grant_table_op) /* 20 */ 70.521 - .long SYMBOL_NAME(do_vm_assist) 70.522 - .long SYMBOL_NAME(do_update_va_mapping_otherdomain) 70.523 - .long SYMBOL_NAME(do_switch_vm86) 70.524 - .long SYMBOL_NAME(do_boot_vcpu) 70.525 - .long SYMBOL_NAME(do_ni_hypercall) /* 25 */ 70.526 - .long SYMBOL_NAME(do_mmuext_op) 70.527 + .long do_set_trap_table /* 0 */ 70.528 + .long do_mmu_update 70.529 + .long do_set_gdt 70.530 + .long do_stack_switch 70.531 + .long do_set_callbacks 70.532 + .long do_fpu_taskswitch /* 5 */ 70.533 + .long do_arch_sched_op 70.534 + .long do_dom0_op 70.535 + .long do_set_debugreg 70.536 + .long do_get_debugreg 70.537 + .long do_update_descriptor /* 10 */ 70.538 + .long do_set_fast_trap 70.539 + .long do_dom_mem_op 70.540 + .long do_multicall 70.541 + .long do_update_va_mapping 70.542 + .long do_set_timer_op /* 15 */ 70.543 + .long do_event_channel_op 70.544 + .long do_xen_version 70.545 + .long do_console_io 70.546 + .long do_physdev_op 70.547 + .long do_grant_table_op /* 20 */ 70.548 + .long do_vm_assist 70.549 + .long do_update_va_mapping_otherdomain 70.550 + .long do_switch_vm86 70.551 + .long do_boot_vcpu 70.552 + .long do_ni_hypercall /* 25 */ 70.553 + .long do_mmuext_op 70.554 .rept NR_hypercalls-((.-hypercall_table)/4) 70.555 - .long SYMBOL_NAME(do_ni_hypercall) 70.556 + .long do_ni_hypercall 70.557 .endr
71.1 --- a/xen/arch/x86/x86_32/mm.c Thu Apr 28 13:54:01 2005 +0000 71.2 +++ b/xen/arch/x86/x86_32/mm.c Fri Apr 29 07:34:47 2005 +0000 71.3 @@ -188,8 +188,8 @@ long do_stack_switch(unsigned long ss, u 71.4 if ( (ss & 3) != 1 ) 71.5 return -EPERM; 71.6 71.7 - current->arch.kernel_ss = ss; 71.8 - current->arch.kernel_sp = esp; 71.9 + current->arch.guest_context.kernel_ss = ss; 71.10 + current->arch.guest_context.kernel_sp = esp; 71.11 t->ss1 = ss; 71.12 t->esp1 = esp; 71.13
72.1 --- a/xen/arch/x86/x86_32/seg_fixup.c Thu Apr 28 13:54:01 2005 +0000 72.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c Fri Apr 29 07:34:47 2005 +0000 72.3 @@ -115,7 +115,7 @@ int get_baselimit(u16 seg, unsigned long 72.4 if ( ldt ) 72.5 { 72.6 table = (unsigned long *)LDT_VIRT_START(d); 72.7 - if ( idx >= d->arch.ldt_ents ) 72.8 + if ( idx >= d->arch.guest_context.ldt_ents ) 72.9 goto fail; 72.10 } 72.11 else /* gdt */ 72.12 @@ -181,7 +181,7 @@ int fixup_seg(u16 seg, unsigned long off 72.13 if ( ldt ) 72.14 { 72.15 table = (unsigned long *)LDT_VIRT_START(d); 72.16 - if ( idx >= d->arch.ldt_ents ) 72.17 + if ( idx >= d->arch.guest_context.ldt_ents ) 72.18 { 72.19 DPRINTK("Segment %04x out of LDT range (%ld)\n", 72.20 seg, d->arch.ldt_ents); 72.21 @@ -263,7 +263,7 @@ int fixup_seg(u16 seg, unsigned long off 72.22 * Called from the general-protection fault handler to attempt to decode 72.23 * and emulate an instruction that depends on 4GB segments. 72.24 */ 72.25 -int gpf_emulate_4gb(struct xen_regs *regs) 72.26 +int gpf_emulate_4gb(struct cpu_user_regs *regs) 72.27 { 72.28 struct exec_domain *d = current; 72.29 trap_info_t *ti; 72.30 @@ -449,7 +449,7 @@ int gpf_emulate_4gb(struct xen_regs *reg 72.31 /* If requested, give a callback on otherwise unused vector 15. */ 72.32 if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) ) 72.33 { 72.34 - ti = &d->arch.traps[15]; 72.35 + ti = &d->arch.guest_context.trap_ctxt[15]; 72.36 tb = &d->arch.trap_bounce; 72.37 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; 72.38 tb->error_code = pb - eip;
73.1 --- a/xen/arch/x86/x86_32/traps.c Thu Apr 28 13:54:01 2005 +0000 73.2 +++ b/xen/arch/x86/x86_32/traps.c Fri Apr 29 07:34:47 2005 +0000 73.3 @@ -29,9 +29,10 @@ static inline int kernel_text_address(un 73.4 void show_guest_stack(void) 73.5 { 73.6 int i; 73.7 - execution_context_t *ec = get_execution_context(); 73.8 - unsigned long *stack = (unsigned long *)ec->esp; 73.9 - printk("Guest EIP is %08x\n ", ec->eip); 73.10 + struct cpu_user_regs *regs = get_cpu_user_regs(); 73.11 + unsigned long *stack = (unsigned long *)regs->esp; 73.12 + 73.13 + printk("Guest EIP is %08x\n ", regs->eip); 73.14 73.15 for ( i = 0; i < kstack_depth_to_print; i++ ) 73.16 { 73.17 @@ -89,7 +90,7 @@ void show_stack(unsigned long *esp) 73.18 show_trace( esp ); 73.19 } 73.20 73.21 -void show_registers(struct xen_regs *regs) 73.22 +void show_registers(struct cpu_user_regs *regs) 73.23 { 73.24 unsigned long ss, ds, es, fs, gs, cs; 73.25 unsigned long eip, esp, eflags; 73.26 @@ -215,9 +216,9 @@ asmlinkage void do_double_fault(void) 73.27 } 73.28 73.29 BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi) 73.30 -asmlinkage void smp_deferred_nmi(struct xen_regs regs) 73.31 +asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs) 73.32 { 73.33 - asmlinkage void do_nmi(struct xen_regs *, unsigned long); 73.34 + asmlinkage void do_nmi(struct cpu_user_regs *, unsigned long); 73.35 ack_APIC_irq(); 73.36 do_nmi(®s, 0); 73.37 } 73.38 @@ -280,7 +281,7 @@ long set_fast_trap(struct exec_domain *p 73.39 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 73.40 return -1; 73.41 73.42 - ti = p->arch.traps + idx; 73.43 + ti = &p->arch.guest_context.trap_ctxt[idx]; 73.44 73.45 /* 73.46 * We can't virtualise interrupt gates, as there's no way to get 73.47 @@ -292,7 +293,7 @@ long set_fast_trap(struct exec_domain *p 73.48 if ( p == current ) 73.49 CLEAR_FAST_TRAP(&p->arch); 73.50 73.51 - p->arch.fast_trap_idx = idx; 73.52 + p->arch.guest_context.fast_trap_idx = idx; 73.53 p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff); 73.54 p->arch.fast_trap_desc.b = 73.55 (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13; 73.56 @@ -319,10 +320,10 @@ long do_set_callbacks(unsigned long even 73.57 if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) ) 73.58 return -EPERM; 73.59 73.60 - d->arch.event_selector = event_selector; 73.61 - d->arch.event_address = event_address; 73.62 - d->arch.failsafe_selector = failsafe_selector; 73.63 - d->arch.failsafe_address = failsafe_address; 73.64 + d->arch.guest_context.event_callback_cs = event_selector; 73.65 + d->arch.guest_context.event_callback_eip = event_address; 73.66 + d->arch.guest_context.failsafe_callback_cs = failsafe_selector; 73.67 + d->arch.guest_context.failsafe_callback_eip = failsafe_address; 73.68 73.69 return 0; 73.70 }
74.1 --- a/xen/arch/x86/x86_64/asm-offsets.c Thu Apr 28 13:54:01 2005 +0000 74.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c Fri Apr 29 07:34:47 2005 +0000 74.3 @@ -24,40 +24,44 @@ 74.4 74.5 void __dummy__(void) 74.6 { 74.7 - OFFSET(XREGS_r15, struct xen_regs, r15); 74.8 - OFFSET(XREGS_r14, struct xen_regs, r14); 74.9 - OFFSET(XREGS_r13, struct xen_regs, r13); 74.10 - OFFSET(XREGS_r12, struct xen_regs, r12); 74.11 - OFFSET(XREGS_rbp, struct xen_regs, rbp); 74.12 - OFFSET(XREGS_rbx, struct xen_regs, rbx); 74.13 - OFFSET(XREGS_r11, struct xen_regs, r11); 74.14 - OFFSET(XREGS_r10, struct xen_regs, r10); 74.15 - OFFSET(XREGS_r9, struct xen_regs, r9); 74.16 - OFFSET(XREGS_r8, struct xen_regs, r8); 74.17 - OFFSET(XREGS_rax, struct xen_regs, rax); 74.18 - OFFSET(XREGS_rcx, struct xen_regs, rcx); 74.19 - OFFSET(XREGS_rdx, struct xen_regs, rdx); 74.20 - OFFSET(XREGS_rsi, struct xen_regs, rsi); 74.21 - OFFSET(XREGS_rdi, struct xen_regs, rdi); 74.22 - OFFSET(XREGS_error_code, struct xen_regs, error_code); 74.23 - OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector); 74.24 - OFFSET(XREGS_rip, struct xen_regs, rip); 74.25 - OFFSET(XREGS_cs, struct xen_regs, cs); 74.26 - OFFSET(XREGS_eflags, struct xen_regs, eflags); 74.27 - OFFSET(XREGS_rsp, struct xen_regs, rsp); 74.28 - OFFSET(XREGS_ss, struct xen_regs, ss); 74.29 - OFFSET(XREGS_kernel_sizeof, struct xen_regs, es); 74.30 - DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs)); 74.31 + OFFSET(UREGS_r15, struct cpu_user_regs, r15); 74.32 + OFFSET(UREGS_r14, struct cpu_user_regs, r14); 74.33 + OFFSET(UREGS_r13, struct cpu_user_regs, r13); 74.34 + OFFSET(UREGS_r12, struct cpu_user_regs, r12); 74.35 + OFFSET(UREGS_rbp, struct cpu_user_regs, rbp); 74.36 + OFFSET(UREGS_rbx, struct cpu_user_regs, rbx); 74.37 + OFFSET(UREGS_r11, struct cpu_user_regs, r11); 74.38 + OFFSET(UREGS_r10, struct cpu_user_regs, r10); 74.39 + OFFSET(UREGS_r9, struct cpu_user_regs, r9); 74.40 + OFFSET(UREGS_r8, struct cpu_user_regs, r8); 74.41 + OFFSET(UREGS_rax, struct cpu_user_regs, rax); 74.42 + OFFSET(UREGS_rcx, struct cpu_user_regs, rcx); 74.43 + OFFSET(UREGS_rdx, struct cpu_user_regs, rdx); 74.44 + OFFSET(UREGS_rsi, struct cpu_user_regs, rsi); 74.45 + OFFSET(UREGS_rdi, struct cpu_user_regs, rdi); 74.46 + OFFSET(UREGS_error_code, struct cpu_user_regs, error_code); 74.47 + OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector); 74.48 + OFFSET(UREGS_rip, struct cpu_user_regs, rip); 74.49 + OFFSET(UREGS_cs, struct cpu_user_regs, cs); 74.50 + OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); 74.51 + OFFSET(UREGS_rsp, struct cpu_user_regs, rsp); 74.52 + OFFSET(UREGS_ss, struct cpu_user_regs, ss); 74.53 + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es); 74.54 + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); 74.55 BLANK(); 74.56 74.57 OFFSET(EDOMAIN_processor, struct exec_domain, processor); 74.58 OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info); 74.59 - OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address); 74.60 - OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address); 74.61 - OFFSET(EDOMAIN_syscall_addr, struct exec_domain, arch.syscall_address); 74.62 OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce); 74.63 OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags); 74.64 - OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp); 74.65 + OFFSET(EDOMAIN_event_addr, struct exec_domain, 74.66 + arch.guest_context.event_callback_eip); 74.67 + OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, 74.68 + arch.guest_context.failsafe_callback_eip); 74.69 + OFFSET(EDOMAIN_syscall_addr, struct exec_domain, 74.70 + arch.guest_context.syscall_callback_eip); 74.71 + OFFSET(EDOMAIN_kernel_sp, struct exec_domain, 74.72 + arch.guest_context.kernel_sp); 74.73 BLANK(); 74.74 74.75 OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
75.1 --- a/xen/arch/x86/x86_64/entry.S Thu Apr 28 13:54:01 2005 +0000 75.2 +++ b/xen/arch/x86/x86_64/entry.S Fri Apr 29 07:34:47 2005 +0000 75.3 @@ -120,10 +120,10 @@ ENTRY(syscall_enter) 75.4 sti 75.5 movq %r10,%rcx 75.6 andq $(NR_hypercalls-1),%rax 75.7 - leaq SYMBOL_NAME(hypercall_table)(%rip),%r10 75.8 + leaq hypercall_table(%rip),%r10 75.9 PERFC_INCR(PERFC_hypercalls, %rax) 75.10 callq *(%r10,%rax,8) 75.11 - movq %rax,XREGS_rax(%rsp) # save the return value 75.12 + movq %rax,UREGS_rax(%rsp) # save the return value 75.13 75.14 /* %rbx: struct exec_domain */ 75.15 test_all_events: 75.16 @@ -131,7 +131,7 @@ test_all_events: 75.17 /*test_softirqs:*/ 75.18 movl EDOMAIN_processor(%rbx),%eax 75.19 shl $IRQSTAT_shift,%rax 75.20 - leaq SYMBOL_NAME(irq_stat)(%rip),%rcx 75.21 + leaq irq_stat(%rip),%rcx 75.22 testl $~0,(%rcx,%rax,1) 75.23 jnz process_softirqs 75.24 /*test_guest_events:*/ 75.25 @@ -160,10 +160,7 @@ test_all_events: 75.26 * and we set it to the fixed value. 75.27 * 75.28 * We also need the room, especially because orig_eax field is used 75.29 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following: 75.30 - * (13) u64 gs_base_user; 75.31 - * (12) u64 gs_base_kernel; 75.32 - * (11) u64 fs_base; 75.33 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following: 75.34 * (10) u64 gs; 75.35 * (9) u64 fs; 75.36 * (8) u64 ds; 75.37 @@ -176,9 +173,6 @@ test_all_events: 75.38 * (2) u64 rip; 75.39 * (2/1) u32 entry_vector; 75.40 * (1/1) u32 error_code; 75.41 - * However, get_stack_bottom() actually returns 64 bytes before the real 75.42 - * bottom of the stack to allow space for: 75.43 - * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers. 75.44 */ 75.45 #define VMX_MONITOR_RFLAGS 0x202 /* IF on */ 75.46 #define NR_SKIPPED_REGS 6 /* See the above explanation */ 75.47 @@ -205,7 +199,7 @@ test_all_events: 75.48 ENTRY(vmx_asm_vmexit_handler) 75.49 /* selectors are restored/saved by VMX */ 75.50 VMX_SAVE_ALL_NOSEGREGS 75.51 - call SYMBOL_NAME(vmx_vmexit_handler) 75.52 + call vmx_vmexit_handler 75.53 jmp vmx_asm_do_resume 75.54 75.55 ENTRY(vmx_asm_do_launch) 75.56 @@ -228,7 +222,7 @@ ENTRY(vmx_asm_do_launch) 75.57 /* VMLUANCH */ 75.58 .byte 0x0f,0x01,0xc2 75.59 pushfq 75.60 - call SYMBOL_NAME(vm_launch_fail) 75.61 + call vm_launch_fail 75.62 hlt 75.63 75.64 ALIGN 75.65 @@ -241,12 +235,12 @@ vmx_test_all_events: 75.66 /*test_softirqs:*/ 75.67 movl EDOMAIN_processor(%rbx),%eax 75.68 shl $IRQSTAT_shift,%rax 75.69 - leaq SYMBOL_NAME(irq_stat)(%rip), %rdx 75.70 + leaq irq_stat(%rip), %rdx 75.71 testl $~0,(%rdx,%rax,1) 75.72 jnz vmx_process_softirqs 75.73 75.74 vmx_restore_all_guest: 75.75 - call SYMBOL_NAME(load_cr2) 75.76 + call load_cr2 75.77 /* 75.78 * Check if we are going back to VMX-based VM 75.79 * By this time, all the setups in the VMCS must be complete. 75.80 @@ -270,14 +264,14 @@ vmx_restore_all_guest: 75.81 /* VMRESUME */ 75.82 .byte 0x0f,0x01,0xc3 75.83 pushfq 75.84 - call SYMBOL_NAME(vm_resume_fail) 75.85 + call vm_resume_fail 75.86 /* Should never reach here */ 75.87 hlt 75.88 75.89 ALIGN 75.90 vmx_process_softirqs: 75.91 sti 75.92 - call SYMBOL_NAME(do_softirq) 75.93 + call do_softirq 75.94 jmp vmx_test_all_events 75.95 #endif 75.96 75.97 @@ -285,7 +279,7 @@ vmx_process_softirqs: 75.98 /* %rbx: struct exec_domain */ 75.99 process_softirqs: 75.100 sti 75.101 - call SYMBOL_NAME(do_softirq) 75.102 + call do_softirq 75.103 jmp test_all_events 75.104 75.105 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */ 75.106 @@ -298,13 +292,13 @@ create_bounce_frame: 75.107 /* Push new frame at registered guest-OS stack base. */ 75.108 pushq %rdx 75.109 movq %rbx,%rdi 75.110 - call SYMBOL_NAME(toggle_guest_mode) 75.111 + call toggle_guest_mode 75.112 popq %rdx 75.113 movq EDOMAIN_kernel_sp(%rbx),%rsi 75.114 jmp 2f 75.115 1: /* In kernel context already: push new frame at existing %rsp. */ 75.116 - movq XREGS_rsp+8(%rsp),%rsi 75.117 - andb $0xfc,XREGS_cs+8(%rsp) # Indicate kernel context to guest. 75.118 + movq UREGS_rsp+8(%rsp),%rsi 75.119 + andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest. 75.120 2: movq $HYPERVISOR_VIRT_START,%rax 75.121 cmpq %rax,%rsi 75.122 jb 1f # In +ve address space? Then okay. 75.123 @@ -312,15 +306,15 @@ 2: movq $HYPERVISOR_VIRT_START,%ra 75.124 cmpq %rax,%rsi 75.125 jb domain_crash_synchronous # Above Xen private area? Then okay. 75.126 1: subq $40,%rsi 75.127 - movq XREGS_ss+8(%rsp),%rax 75.128 + movq UREGS_ss+8(%rsp),%rax 75.129 FLT2: movq %rax,32(%rsi) # SS 75.130 - movq XREGS_rsp+8(%rsp),%rax 75.131 + movq UREGS_rsp+8(%rsp),%rax 75.132 FLT3: movq %rax,24(%rsi) # RSP 75.133 - movq XREGS_eflags+8(%rsp),%rax 75.134 + movq UREGS_eflags+8(%rsp),%rax 75.135 FLT4: movq %rax,16(%rsi) # RFLAGS 75.136 - movq XREGS_cs+8(%rsp),%rax 75.137 + movq UREGS_cs+8(%rsp),%rax 75.138 FLT5: movq %rax,8(%rsi) # CS 75.139 - movq XREGS_rip+8(%rsp),%rax 75.140 + movq UREGS_rip+8(%rsp),%rax 75.141 FLT6: movq %rax,(%rsi) # RIP 75.142 movb TRAPBOUNCE_flags(%rdx),%cl 75.143 testb $TBF_EXCEPTION_ERRCODE,%cl 75.144 @@ -345,19 +339,19 @@ FLT11: movq %rax,8(%rsi) 75.145 movl %ds,%eax 75.146 FLT12: movq %rax,(%rsi) # DS 75.147 2: subq $16,%rsi 75.148 - movq XREGS_r11+8(%rsp),%rax 75.149 + movq UREGS_r11+8(%rsp),%rax 75.150 FLT13: movq %rax,8(%rsi) # R11 75.151 - movq XREGS_rcx+8(%rsp),%rax 75.152 + movq UREGS_rcx+8(%rsp),%rax 75.153 FLT14: movq %rax,(%rsi) # RCX 75.154 /* Rewrite our stack frame and return to guest-OS mode. */ 75.155 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ 75.156 - movq $TRAP_syscall,XREGS_entry_vector+8(%rsp) 75.157 - andl $0xfffcbeff,XREGS_eflags+8(%rsp) 75.158 - movq $__GUEST_SS,XREGS_ss+8(%rsp) 75.159 - movq %rsi,XREGS_rsp+8(%rsp) 75.160 - movq $__GUEST_CS,XREGS_cs+8(%rsp) 75.161 + movq $TRAP_syscall,UREGS_entry_vector+8(%rsp) 75.162 + andl $0xfffcbeff,UREGS_eflags+8(%rsp) 75.163 + movq $__GUEST_SS,UREGS_ss+8(%rsp) 75.164 + movq %rsi,UREGS_rsp+8(%rsp) 75.165 + movq $__GUEST_CS,UREGS_cs+8(%rsp) 75.166 movq TRAPBOUNCE_eip(%rdx),%rax 75.167 - movq %rax,XREGS_rip+8(%rsp) 75.168 + movq %rax,UREGS_rip+8(%rsp) 75.169 movb $0,TRAPBOUNCE_flags(%rdx) 75.170 ret 75.171 .section __ex_table,"a" 75.172 @@ -383,7 +377,7 @@ process_guest_exception_and_events: 75.173 /* No special register assumptions. */ 75.174 ENTRY(ret_from_intr) 75.175 GET_CURRENT(%rbx) 75.176 - testb $3,XREGS_cs(%rsp) 75.177 + testb $3,UREGS_cs(%rsp) 75.178 jnz test_all_events 75.179 jmp restore_all_xen 75.180 75.181 @@ -391,45 +385,45 @@ ENTRY(ret_from_intr) 75.182 /* No special register assumptions. */ 75.183 error_code: 75.184 SAVE_ALL 75.185 - testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp) 75.186 + testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) 75.187 jz exception_with_ints_disabled 75.188 sti 75.189 movq %rsp,%rdi 75.190 - movl XREGS_entry_vector(%rsp),%eax 75.191 - leaq SYMBOL_NAME(exception_table)(%rip),%rdx 75.192 + movl UREGS_entry_vector(%rsp),%eax 75.193 + leaq exception_table(%rip),%rdx 75.194 GET_CURRENT(%rbx) 75.195 PERFC_INCR(PERFC_exceptions, %rax) 75.196 callq *(%rdx,%rax,8) 75.197 - testb $3,XREGS_cs(%rsp) 75.198 + testb $3,UREGS_cs(%rsp) 75.199 jz restore_all_xen 75.200 jmp process_guest_exception_and_events 75.201 75.202 /* No special register assumptions. */ 75.203 exception_with_ints_disabled: 75.204 - testb $3,XREGS_cs(%rsp) # interrupts disabled outside Xen? 75.205 + testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? 75.206 jnz FATAL_exception_with_ints_disabled 75.207 movq %rsp,%rdi 75.208 call search_pre_exception_table 75.209 testq %rax,%rax # no fixup code for faulting EIP? 75.210 jz FATAL_exception_with_ints_disabled 75.211 - movq %rax,XREGS_rip(%rsp) 75.212 - subq $8,XREGS_rsp(%rsp) # add ec/ev to previous stack frame 75.213 - testb $15,XREGS_rsp(%rsp) # return %rsp is now aligned? 75.214 + movq %rax,UREGS_rip(%rsp) 75.215 + subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame 75.216 + testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned? 75.217 jz 1f # then there is a pad quadword already 75.218 movq %rsp,%rsi 75.219 subq $8,%rsp 75.220 movq %rsp,%rdi 75.221 - movq $XREGS_kernel_sizeof/8,%rcx 75.222 + movq $UREGS_kernel_sizeof/8,%rcx 75.223 rep; movsq # make room for ec/ev 75.224 -1: movq XREGS_error_code(%rsp),%rax # ec/ev 75.225 - movq %rax,XREGS_kernel_sizeof(%rsp) 75.226 +1: movq UREGS_error_code(%rsp),%rax # ec/ev 75.227 + movq %rax,UREGS_kernel_sizeof(%rsp) 75.228 jmp restore_all_xen # return to fixup code 75.229 75.230 /* No special register assumptions. */ 75.231 FATAL_exception_with_ints_disabled: 75.232 - movl XREGS_entry_vector(%rsp),%edi 75.233 + movl UREGS_entry_vector(%rsp),%edi 75.234 movq %rsp,%rsi 75.235 - call SYMBOL_NAME(fatal_trap) 75.236 + call fatal_trap 75.237 ud2 75.238 75.239 ENTRY(divide_error) 75.240 @@ -526,61 +520,67 @@ ENTRY(nmi) 75.241 inb $0x61,%al 75.242 movl %eax,%esi # reason 75.243 movq %rsp,%rdi # regs 75.244 - call SYMBOL_NAME(do_nmi) 75.245 + call do_nmi 75.246 jmp restore_all_xen 75.247 75.248 +do_arch_sched_op: 75.249 + # Ensure we return success even if we return via schedule_tail() 75.250 + xorl %eax,%eax 75.251 + movq %rax,UREGS_rax+8(%rsp) 75.252 + jmp do_sched_op 75.253 + 75.254 .data 75.255 75.256 ENTRY(exception_table) 75.257 - .quad SYMBOL_NAME(do_divide_error) 75.258 - .quad SYMBOL_NAME(do_debug) 75.259 + .quad do_divide_error 75.260 + .quad do_debug 75.261 .quad 0 # nmi 75.262 - .quad SYMBOL_NAME(do_int3) 75.263 - .quad SYMBOL_NAME(do_overflow) 75.264 - .quad SYMBOL_NAME(do_bounds) 75.265 - .quad SYMBOL_NAME(do_invalid_op) 75.266 - .quad SYMBOL_NAME(math_state_restore) 75.267 - .quad SYMBOL_NAME(do_double_fault) 75.268 - .quad SYMBOL_NAME(do_coprocessor_segment_overrun) 75.269 - .quad SYMBOL_NAME(do_invalid_TSS) 75.270 - .quad SYMBOL_NAME(do_segment_not_present) 75.271 - .quad SYMBOL_NAME(do_stack_segment) 75.272 - .quad SYMBOL_NAME(do_general_protection) 75.273 - .quad SYMBOL_NAME(do_page_fault) 75.274 - .quad SYMBOL_NAME(do_spurious_interrupt_bug) 75.275 - .quad SYMBOL_NAME(do_coprocessor_error) 75.276 - .quad SYMBOL_NAME(do_alignment_check) 75.277 - .quad SYMBOL_NAME(do_machine_check) 75.278 - .quad SYMBOL_NAME(do_simd_coprocessor_error) 75.279 + .quad do_int3 75.280 + .quad do_overflow 75.281 + .quad do_bounds 75.282 + .quad do_invalid_op 75.283 + .quad math_state_restore 75.284 + .quad do_double_fault 75.285 + .quad do_coprocessor_segment_overrun 75.286 + .quad do_invalid_TSS 75.287 + .quad do_segment_not_present 75.288 + .quad do_stack_segment 75.289 + .quad do_general_protection 75.290 + .quad do_page_fault 75.291 + .quad do_spurious_interrupt_bug 75.292 + .quad do_coprocessor_error 75.293 + .quad do_alignment_check 75.294 + .quad do_machine_check 75.295 + .quad do_simd_coprocessor_error 75.296 75.297 ENTRY(hypercall_table) 75.298 - .quad SYMBOL_NAME(do_set_trap_table) /* 0 */ 75.299 - .quad SYMBOL_NAME(do_mmu_update) 75.300 - .quad SYMBOL_NAME(do_set_gdt) 75.301 - .quad SYMBOL_NAME(do_stack_switch) 75.302 - .quad SYMBOL_NAME(do_set_callbacks) 75.303 - .quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ 75.304 - .quad SYMBOL_NAME(do_sched_op) 75.305 - .quad SYMBOL_NAME(do_dom0_op) 75.306 - .quad SYMBOL_NAME(do_set_debugreg) 75.307 - .quad SYMBOL_NAME(do_get_debugreg) 75.308 - .quad SYMBOL_NAME(do_update_descriptor) /* 10 */ 75.309 - .quad SYMBOL_NAME(do_ni_hypercall) 75.310 - .quad SYMBOL_NAME(do_dom_mem_op) 75.311 - .quad SYMBOL_NAME(do_multicall) 75.312 - .quad SYMBOL_NAME(do_update_va_mapping) 75.313 - .quad SYMBOL_NAME(do_set_timer_op) /* 15 */ 75.314 - .quad SYMBOL_NAME(do_event_channel_op) 75.315 - .quad SYMBOL_NAME(do_xen_version) 75.316 - .quad SYMBOL_NAME(do_console_io) 75.317 - .quad SYMBOL_NAME(do_physdev_op) 75.318 - .quad SYMBOL_NAME(do_grant_table_op) /* 20 */ 75.319 - .quad SYMBOL_NAME(do_vm_assist) 75.320 - .quad SYMBOL_NAME(do_update_va_mapping_otherdomain) 75.321 - .quad SYMBOL_NAME(do_switch_to_user) 75.322 - .quad SYMBOL_NAME(do_boot_vcpu) 75.323 - .quad SYMBOL_NAME(do_set_segment_base) /* 25 */ 75.324 - .quad SYMBOL_NAME(do_mmuext_op) 75.325 + .quad do_set_trap_table /* 0 */ 75.326 + .quad do_mmu_update 75.327 + .quad do_set_gdt 75.328 + .quad do_stack_switch 75.329 + .quad do_set_callbacks 75.330 + .quad do_fpu_taskswitch /* 5 */ 75.331 + .quad do_arch_sched_op 75.332 + .quad do_dom0_op 75.333 + .quad do_set_debugreg 75.334 + .quad do_get_debugreg 75.335 + .quad do_update_descriptor /* 10 */ 75.336 + .quad do_ni_hypercall 75.337 + .quad do_dom_mem_op 75.338 + .quad do_multicall 75.339 + .quad do_update_va_mapping 75.340 + .quad do_set_timer_op /* 15 */ 75.341 + .quad do_event_channel_op 75.342 + .quad do_xen_version 75.343 + .quad do_console_io 75.344 + .quad do_physdev_op 75.345 + .quad do_grant_table_op /* 20 */ 75.346 + .quad do_vm_assist 75.347 + .quad do_update_va_mapping_otherdomain 75.348 + .quad do_switch_to_user 75.349 + .quad do_boot_vcpu 75.350 + .quad do_set_segment_base /* 25 */ 75.351 + .quad do_mmuext_op 75.352 .rept NR_hypercalls-((.-hypercall_table)/4) 75.353 - .quad SYMBOL_NAME(do_ni_hypercall) 75.354 + .quad do_ni_hypercall 75.355 .endr
76.1 --- a/xen/arch/x86/x86_64/mm.c Thu Apr 28 13:54:01 2005 +0000 76.2 +++ b/xen/arch/x86/x86_64/mm.c Fri Apr 29 07:34:47 2005 +0000 76.3 @@ -240,8 +240,8 @@ long do_stack_switch(unsigned long ss, u 76.4 { 76.5 if ( (ss & 3) != 3 ) 76.6 return -EPERM; 76.7 - current->arch.kernel_ss = ss; 76.8 - current->arch.kernel_sp = esp; 76.9 + current->arch.guest_context.kernel_ss = ss; 76.10 + current->arch.guest_context.kernel_sp = esp; 76.11 return 0; 76.12 } 76.13 76.14 @@ -253,21 +253,24 @@ long do_set_segment_base(unsigned int wh 76.15 switch ( which ) 76.16 { 76.17 case SEGBASE_FS: 76.18 - ed->arch.user_ctxt.fs_base = base; 76.19 if ( wrmsr_user(MSR_FS_BASE, base, base>>32) ) 76.20 ret = -EFAULT; 76.21 + else 76.22 + ed->arch.guest_context.fs_base = base; 76.23 break; 76.24 76.25 case SEGBASE_GS_USER: 76.26 - ed->arch.user_ctxt.gs_base_user = base; 76.27 if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) ) 76.28 ret = -EFAULT; 76.29 + else 76.30 + ed->arch.guest_context.gs_base_user = base; 76.31 break; 76.32 76.33 case SEGBASE_GS_KERNEL: 76.34 - ed->arch.user_ctxt.gs_base_kernel = base; 76.35 if ( wrmsr_user(MSR_GS_BASE, base, base>>32) ) 76.36 ret = -EFAULT; 76.37 + else 76.38 + ed->arch.guest_context.gs_base_kernel = base; 76.39 break; 76.40 76.41 case SEGBASE_GS_USER_SEL:
77.1 --- a/xen/arch/x86/x86_64/traps.c Thu Apr 28 13:54:01 2005 +0000 77.2 +++ b/xen/arch/x86/x86_64/traps.c Fri Apr 29 07:34:47 2005 +0000 77.3 @@ -24,9 +24,10 @@ static inline int kernel_text_address(un 77.4 void show_guest_stack(void) 77.5 { 77.6 int i; 77.7 - execution_context_t *ec = get_execution_context(); 77.8 - unsigned long *stack = (unsigned long *)ec->rsp; 77.9 - printk("Guest RIP is %016lx\n ", ec->rip); 77.10 + struct cpu_user_regs *regs = get_cpu_user_regs(); 77.11 + unsigned long *stack = (unsigned long *)regs->rsp; 77.12 + 77.13 + printk("Guest RIP is %016lx\n ", regs->rip); 77.14 77.15 for ( i = 0; i < kstack_depth_to_print; i++ ) 77.16 { 77.17 @@ -84,7 +85,7 @@ void show_stack(unsigned long *rsp) 77.18 show_trace(rsp); 77.19 } 77.20 77.21 -void show_registers(struct xen_regs *regs) 77.22 +void show_registers(struct cpu_user_regs *regs) 77.23 { 77.24 printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n", 77.25 smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags); 77.26 @@ -130,7 +131,7 @@ void show_page_walk(unsigned long addr) 77.27 } 77.28 77.29 asmlinkage void double_fault(void); 77.30 -asmlinkage void do_double_fault(struct xen_regs *regs) 77.31 +asmlinkage void do_double_fault(struct cpu_user_regs *regs) 77.32 { 77.33 /* Disable the NMI watchdog. It's useless now. */ 77.34 watchdog_on = 0; 77.35 @@ -254,9 +255,9 @@ long do_set_callbacks(unsigned long even 77.36 { 77.37 struct exec_domain *d = current; 77.38 77.39 - d->arch.event_address = event_address; 77.40 - d->arch.failsafe_address = failsafe_address; 77.41 - d->arch.syscall_address = syscall_address; 77.42 + d->arch.guest_context.event_callback_eip = event_address; 77.43 + d->arch.guest_context.failsafe_callback_eip = failsafe_address; 77.44 + d->arch.guest_context.syscall_callback_eip = syscall_address; 77.45 77.46 return 0; 77.47 }
78.1 --- a/xen/arch/x86/x86_emulate.c Thu Apr 28 13:54:01 2005 +0000 78.2 +++ b/xen/arch/x86/x86_emulate.c Fri Apr 29 07:34:47 2005 +0000 78.3 @@ -377,7 +377,7 @@ do{ __asm__ __volatile__ ( 78.4 78.5 void * 78.6 decode_register( 78.7 - u8 modrm_reg, struct xen_regs *regs, int highbyte_regs) 78.8 + u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs) 78.9 { 78.10 void *p; 78.11 78.12 @@ -417,7 +417,7 @@ decode_register( 78.13 78.14 int 78.15 x86_emulate_memop( 78.16 - struct xen_regs *regs, 78.17 + struct cpu_user_regs *regs, 78.18 unsigned long cr2, 78.19 struct x86_mem_emulator *ops, 78.20 int mode) 78.21 @@ -430,7 +430,7 @@ x86_emulate_memop( 78.22 struct operand src, dst; 78.23 78.24 /* Shadow copy of register state. Committed on successful emulation. */ 78.25 - struct xen_regs _regs = *regs; 78.26 + struct cpu_user_regs _regs = *regs; 78.27 78.28 /* Legacy prefixes. */ 78.29 for ( i = 0; i < 8; i++ )
79.1 --- a/xen/common/dom0_ops.c Thu Apr 28 13:54:01 2005 +0000 79.2 +++ b/xen/common/dom0_ops.c Fri Apr 29 07:34:47 2005 +0000 79.3 @@ -21,7 +21,7 @@ 79.4 79.5 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op); 79.6 extern void arch_getdomaininfo_ctxt( 79.7 - struct exec_domain *, full_execution_context_t *); 79.8 + struct exec_domain *, struct vcpu_guest_context *); 79.9 79.10 static inline int is_free_domid(domid_t dom) 79.11 { 79.12 @@ -279,7 +279,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 79.13 79.14 case DOM0_GETDOMAININFO: 79.15 { 79.16 - full_execution_context_t *c; 79.17 + struct vcpu_guest_context *c; 79.18 struct domain *d; 79.19 struct exec_domain *ed; 79.20 79.21 @@ -331,7 +331,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 79.22 79.23 if ( op->u.getdomaininfo.ctxt != NULL ) 79.24 { 79.25 - if ( (c = xmalloc(full_execution_context_t)) == NULL ) 79.26 + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) 79.27 { 79.28 ret = -ENOMEM; 79.29 put_domain(d);
80.1 --- a/xen/common/domain.c Thu Apr 28 13:54:01 2005 +0000 80.2 +++ b/xen/common/domain.c Fri Apr 29 07:34:47 2005 +0000 80.3 @@ -231,7 +231,7 @@ void domain_destruct(struct domain *d) 80.4 int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo) 80.5 { 80.6 int rc = 0; 80.7 - full_execution_context_t *c = NULL; 80.8 + struct vcpu_guest_context *c = NULL; 80.9 unsigned long vcpu = setdomaininfo->exec_domain; 80.10 struct exec_domain *ed; 80.11 80.12 @@ -242,7 +242,7 @@ int set_info_guest(struct domain *p, dom 80.13 !test_bit(EDF_CTRLPAUSE, &ed->ed_flags)) 80.14 return -EINVAL; 80.15 80.16 - if ( (c = xmalloc(full_execution_context_t)) == NULL ) 80.17 + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) 80.18 return -ENOMEM; 80.19 80.20 if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) ) 80.21 @@ -266,12 +266,12 @@ int set_info_guest(struct domain *p, dom 80.22 * than domain 0. ie. the domains that are being built by the userspace dom0 80.23 * domain builder. 80.24 */ 80.25 -long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) 80.26 +long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) 80.27 { 80.28 struct domain *d = current->domain; 80.29 struct exec_domain *ed; 80.30 int rc = 0; 80.31 - full_execution_context_t *c; 80.32 + struct vcpu_guest_context *c; 80.33 80.34 if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) ) 80.35 return -EINVAL; 80.36 @@ -279,7 +279,7 @@ long do_boot_vcpu(unsigned long vcpu, fu 80.37 if ( alloc_exec_domain_struct(d, vcpu) == NULL ) 80.38 return -ENOMEM; 80.39 80.40 - if ( (c = xmalloc(full_execution_context_t)) == NULL ) 80.41 + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) 80.42 { 80.43 rc = -ENOMEM; 80.44 goto out;
81.1 --- a/xen/common/keyhandler.c Thu Apr 28 13:54:01 2005 +0000 81.2 +++ b/xen/common/keyhandler.c Fri Apr 29 07:34:47 2005 +0000 81.3 @@ -36,7 +36,7 @@ static void keypress_softirq(void) 81.4 (*h)(key); 81.5 } 81.6 81.7 -void handle_keypress(unsigned char key, struct xen_regs *regs) 81.8 +void handle_keypress(unsigned char key, struct cpu_user_regs *regs) 81.9 { 81.10 irq_keyhandler_t *h; 81.11 81.12 @@ -83,13 +83,13 @@ static void show_handlers(unsigned char 81.13 key_table[i].desc); 81.14 } 81.15 81.16 -static void dump_registers(unsigned char key, struct xen_regs *regs) 81.17 +static void dump_registers(unsigned char key, struct cpu_user_regs *regs) 81.18 { 81.19 printk("'%c' pressed -> dumping registers\n", key); 81.20 show_registers(regs); 81.21 } 81.22 81.23 -static void halt_machine(unsigned char key, struct xen_regs *regs) 81.24 +static void halt_machine(unsigned char key, struct cpu_user_regs *regs) 81.25 { 81.26 printk("'%c' pressed -> rebooting machine\n", key); 81.27 machine_restart(NULL); 81.28 @@ -125,9 +125,12 @@ static void do_task_queues(unsigned char 81.29 printk("Notifying guest... %d/%d\n", d->id, ed->eid); 81.30 printk("port %d/%d stat %d %d %d\n", 81.31 VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG], 81.32 - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]), 81.33 - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]), 81.34 - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel)); 81.35 + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 81.36 + &d->shared_info->evtchn_pending[0]), 81.37 + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 81.38 + &d->shared_info->evtchn_mask[0]), 81.39 + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, 81.40 + &ed->vcpu_info->evtchn_pending_sel)); 81.41 send_guest_virq(ed, VIRQ_DEBUG); 81.42 } 81.43 } 81.44 @@ -147,7 +150,7 @@ extern void perfc_printall(unsigned char 81.45 extern void perfc_reset(unsigned char key); 81.46 #endif 81.47 81.48 -void do_debug_key(unsigned char key, struct xen_regs *regs) 81.49 +void do_debug_key(unsigned char key, struct cpu_user_regs *regs) 81.50 { 81.51 (void)debugger_trap_fatal(0xf001, regs); 81.52 nop(); /* Prevent the compiler doing tail call
82.1 --- a/xen/common/schedule.c Thu Apr 28 13:54:01 2005 +0000 82.2 +++ b/xen/common/schedule.c Fri Apr 29 07:34:47 2005 +0000 82.3 @@ -228,7 +228,9 @@ long do_block(void) 82.4 82.5 /* Check for events /after/ blocking: avoids wakeup waiting race. */ 82.6 if ( event_pending(ed) ) 82.7 + { 82.8 clear_bit(EDF_BLOCKED, &ed->ed_flags); 82.9 + } 82.10 else 82.11 { 82.12 TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid); 82.13 @@ -382,7 +384,7 @@ static void __enter_scheduler(void) 82.14 spin_unlock_irq(&schedule_data[cpu].schedule_lock); 82.15 82.16 if ( unlikely(prev == next) ) 82.17 - return; 82.18 + return continue_running(prev); 82.19 82.20 perfc_incrc(sched_ctx); 82.21
83.1 --- a/xen/drivers/char/console.c Thu Apr 28 13:54:01 2005 +0000 83.2 +++ b/xen/drivers/char/console.c Fri Apr 29 07:34:47 2005 +0000 83.3 @@ -260,7 +260,7 @@ static void switch_serial_input(void) 83.4 } 83.5 } 83.6 83.7 -static void __serial_rx(unsigned char c, struct xen_regs *regs) 83.8 +static void __serial_rx(unsigned char c, struct cpu_user_regs *regs) 83.9 { 83.10 if ( xen_rx ) 83.11 { 83.12 @@ -274,7 +274,7 @@ static void __serial_rx(unsigned char c, 83.13 } 83.14 } 83.15 83.16 -static void serial_rx(unsigned char c, struct xen_regs *regs) 83.17 +static void serial_rx(unsigned char c, struct cpu_user_regs *regs) 83.18 { 83.19 static int switch_code_count = 0; 83.20
84.1 --- a/xen/drivers/char/serial.c Thu Apr 28 13:54:01 2005 +0000 84.2 +++ b/xen/drivers/char/serial.c Fri Apr 29 07:34:47 2005 +0000 84.3 @@ -105,7 +105,7 @@ static struct uart com[2] = { 84.4 * PRIVATE FUNCTIONS 84.5 */ 84.6 84.7 -static void uart_rx(struct uart *uart, struct xen_regs *regs) 84.8 +static void uart_rx(struct uart *uart, struct cpu_user_regs *regs) 84.9 { 84.10 unsigned char c; 84.11 84.12 @@ -132,7 +132,7 @@ static void uart_rx(struct uart *uart, s 84.13 } 84.14 84.15 static void serial_interrupt( 84.16 - int irq, void *dev_id, struct xen_regs *regs) 84.17 + int irq, void *dev_id, struct cpu_user_regs *regs) 84.18 { 84.19 uart_rx((struct uart *)dev_id, regs); 84.20 }
85.1 --- a/xen/include/asm-ia64/debugger.h Thu Apr 28 13:54:01 2005 +0000 85.2 +++ b/xen/include/asm-ia64/debugger.h Fri Apr 29 07:34:47 2005 +0000 85.3 @@ -26,13 +26,13 @@ 85.4 85.5 /* The main trap handlers use these helper macros which include early bail. */ 85.6 static inline int debugger_trap_entry( 85.7 - unsigned int vector, struct xen_regs *regs) 85.8 + unsigned int vector, struct cpu_user_regs *regs) 85.9 { 85.10 return 0; 85.11 } 85.12 85.13 static inline int debugger_trap_fatal( 85.14 - unsigned int vector, struct xen_regs *regs) 85.15 + unsigned int vector, struct cpu_user_regs *regs) 85.16 { 85.17 return 0; 85.18 }
86.1 --- a/xen/include/asm-ia64/domain.h Thu Apr 28 13:54:01 2005 +0000 86.2 +++ b/xen/include/asm-ia64/domain.h Fri Apr 29 07:34:47 2005 +0000 86.3 @@ -6,7 +6,7 @@ 86.4 extern void arch_do_createdomain(struct exec_domain *); 86.5 86.6 extern int arch_final_setup_guestos( 86.7 - struct exec_domain *, full_execution_context_t *); 86.8 + struct exec_domain *, struct vcpu_guest_context *); 86.9 86.10 extern void domain_relinquish_resources(struct domain *); 86.11
87.1 --- a/xen/include/asm-ia64/regs.h Thu Apr 28 13:54:01 2005 +0000 87.2 +++ b/xen/include/asm-ia64/regs.h Fri Apr 29 07:34:47 2005 +0000 87.3 @@ -1,2 +1,2 @@ 87.4 #include <asm/ptrace.h> 87.5 -#define xen_regs pt_regs 87.6 +#define cpu_user_regs pt_regs
88.1 --- a/xen/include/asm-x86/apic.h Thu Apr 28 13:54:01 2005 +0000 88.2 +++ b/xen/include/asm-x86/apic.h Fri Apr 29 07:34:47 2005 +0000 88.3 @@ -74,10 +74,10 @@ extern void sync_Arb_IDs (void); 88.4 extern void init_bsp_APIC (void); 88.5 extern void setup_local_APIC (void); 88.6 extern void init_apic_mappings (void); 88.7 -extern void smp_local_timer_interrupt (struct xen_regs * regs); 88.8 +extern void smp_local_timer_interrupt (struct cpu_user_regs * regs); 88.9 extern void setup_APIC_clocks (void); 88.10 extern void setup_apic_nmi_watchdog (void); 88.11 -extern void nmi_watchdog_tick (struct xen_regs * regs); 88.12 +extern void nmi_watchdog_tick (struct cpu_user_regs * regs); 88.13 extern void touch_nmi_watchdog(void); 88.14 extern int APIC_init_uniprocessor (void); 88.15 extern void disable_APIC_timer(void);
89.1 --- a/xen/include/asm-x86/config.h Thu Apr 28 13:54:01 2005 +0000 89.2 +++ b/xen/include/asm-x86/config.h Fri Apr 29 07:34:47 2005 +0000 89.3 @@ -64,16 +64,13 @@ 89.4 /* Linkage for x86 */ 89.5 #define __ALIGN .align 16,0x90 89.6 #define __ALIGN_STR ".align 16,0x90" 89.7 -#define SYMBOL_NAME_STR(X) #X 89.8 -#define SYMBOL_NAME(X) X 89.9 -#define SYMBOL_NAME_LABEL(X) X##: 89.10 #ifdef __ASSEMBLY__ 89.11 #define ALIGN __ALIGN 89.12 #define ALIGN_STR __ALIGN_STR 89.13 -#define ENTRY(name) \ 89.14 - .globl SYMBOL_NAME(name); \ 89.15 - ALIGN; \ 89.16 - SYMBOL_NAME_LABEL(name) 89.17 +#define ENTRY(name) \ 89.18 + .globl name; \ 89.19 + ALIGN; \ 89.20 + name: 89.21 #endif 89.22 89.23 #define barrier() __asm__ __volatile__("": : :"memory")
90.1 --- a/xen/include/asm-x86/debugger.h Thu Apr 28 13:54:01 2005 +0000 90.2 +++ b/xen/include/asm-x86/debugger.h Fri Apr 29 07:34:47 2005 +0000 90.3 @@ -38,11 +38,11 @@ 90.4 #define DEBUGGER_trap_fatal(_v, _r) \ 90.5 if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed; 90.6 90.7 -int call_with_registers(int (*f)(struct xen_regs *r)); 90.8 +int call_with_registers(int (*f)(struct cpu_user_regs *r)); 90.9 90.10 #if defined(CRASH_DEBUG) 90.11 90.12 -extern int __trap_to_cdb(struct xen_regs *r); 90.13 +extern int __trap_to_cdb(struct cpu_user_regs *r); 90.14 #define debugger_trap_entry(_v, _r) (0) 90.15 #define debugger_trap_fatal(_v, _r) __trap_to_cdb(_r) 90.16 #define debugger_trap_immediate() call_with_registers(__trap_to_cdb) 90.17 @@ -52,7 +52,7 @@ extern int __trap_to_cdb(struct xen_regs 90.18 #include <xen/softirq.h> 90.19 90.20 static inline int debugger_trap_entry( 90.21 - unsigned int vector, struct xen_regs *regs) 90.22 + unsigned int vector, struct cpu_user_regs *regs) 90.23 { 90.24 struct exec_domain *ed = current; 90.25 90.26 @@ -77,16 +77,16 @@ static inline int debugger_trap_entry( 90.27 90.28 #elif 0 90.29 90.30 -extern int kdb_trap(int, int, struct xen_regs *); 90.31 +extern int kdb_trap(int, int, struct cpu_user_regs *); 90.32 90.33 static inline int debugger_trap_entry( 90.34 - unsigned int vector, struct xen_regs *regs) 90.35 + unsigned int vector, struct cpu_user_regs *regs) 90.36 { 90.37 return 0; 90.38 } 90.39 90.40 static inline int debugger_trap_fatal( 90.41 - unsigned int vector, struct xen_regs *regs) 90.42 + unsigned int vector, struct cpu_user_regs *regs) 90.43 { 90.44 return kdb_trap(vector, 0, regs); 90.45 }
91.1 --- a/xen/include/asm-x86/domain.h Thu Apr 28 13:54:01 2005 +0000 91.2 +++ b/xen/include/asm-x86/domain.h Fri Apr 29 07:34:47 2005 +0000 91.3 @@ -66,38 +66,12 @@ struct arch_domain 91.4 91.5 struct arch_exec_domain 91.6 { 91.7 - unsigned long kernel_sp; 91.8 - unsigned long kernel_ss; 91.9 + struct vcpu_guest_context guest_context; 91.10 91.11 unsigned long flags; /* TF_ */ 91.12 91.13 - /* Hardware debugging registers */ 91.14 - unsigned long debugreg[8]; /* %%db0-7 debug registers */ 91.15 - 91.16 - /* floating point info */ 91.17 - struct i387_state i387; 91.18 - 91.19 - /* general user-visible register state */ 91.20 - execution_context_t user_ctxt; 91.21 - 91.22 void (*schedule_tail) (struct exec_domain *); 91.23 91.24 - /* 91.25 - * Return vectors pushed to us by guest OS. 91.26 - * The stack frame for events is exactly that of an x86 hardware interrupt. 91.27 - * The stack frame for a failsafe callback is augmented with saved values 91.28 - * for segment registers %ds, %es, %fs and %gs: 91.29 - * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss] 91.30 - */ 91.31 - 91.32 - unsigned long event_selector; /* entry CS (x86/32 only) */ 91.33 - unsigned long event_address; /* entry EIP */ 91.34 - 91.35 - unsigned long failsafe_selector; /* entry CS (x86/32 only) */ 91.36 - unsigned long failsafe_address; /* entry EIP */ 91.37 - 91.38 - unsigned long syscall_address; /* entry EIP (x86/64 only) */ 91.39 - 91.40 /* Bounce information for propagating an exception to guest OS. */ 91.41 struct trap_bounce trap_bounce; 91.42 91.43 @@ -108,10 +82,8 @@ struct arch_exec_domain 91.44 91.45 /* Trap info. */ 91.46 #ifdef ARCH_HAS_FAST_TRAP 91.47 - int fast_trap_idx; 91.48 struct desc_struct fast_trap_desc; 91.49 #endif 91.50 - trap_info_t traps[256]; 91.51 91.52 /* Virtual Machine Extensions */ 91.53 struct arch_vmx_struct arch_vmx; 91.54 @@ -143,7 +115,7 @@ struct arch_exec_domain 91.55 unsigned long guest_cr2; 91.56 91.57 /* Current LDT details. */ 91.58 - unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt; 91.59 + unsigned long shadow_ldt_mapcnt; 91.60 /* Next entry is passed to LGDT on domain switch. */ 91.61 char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */ 91.62 } __cacheline_aligned;
92.1 --- a/xen/include/asm-x86/ldt.h Thu Apr 28 13:54:01 2005 +0000 92.2 +++ b/xen/include/asm-x86/ldt.h Fri Apr 29 07:34:47 2005 +0000 92.3 @@ -10,7 +10,7 @@ static inline void load_LDT(struct exec_ 92.4 struct desc_struct *desc; 92.5 unsigned long ents; 92.6 92.7 - if ( (ents = ed->arch.ldt_ents) == 0 ) 92.8 + if ( (ents = ed->arch.guest_context.ldt_ents) == 0 ) 92.9 { 92.10 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) ); 92.11 }
93.1 --- a/xen/include/asm-x86/processor.h Thu Apr 28 13:54:01 2005 +0000 93.2 +++ b/xen/include/asm-x86/processor.h Fri Apr 29 07:34:47 2005 +0000 93.3 @@ -110,7 +110,7 @@ 93.4 #define TRAP_deferred_nmi 31 93.5 93.6 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */ 93.7 -/* NB. Same as ECF_IN_SYSCALL. No bits in common with any other TRAP_* defn. */ 93.8 +/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */ 93.9 #define TRAP_syscall 256 93.10 93.11 /* 93.12 @@ -191,7 +191,9 @@ extern void dodgy_tsc(void); 93.13 /* 93.14 * Generic CPUID function 93.15 */ 93.16 -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) 93.17 +static inline void cpuid( 93.18 + int op, unsigned int *eax, unsigned int *ebx, 93.19 + unsigned int *ecx, unsigned int *edx) 93.20 { 93.21 __asm__("cpuid" 93.22 : "=a" (*eax), 93.23 @@ -330,10 +332,6 @@ static inline void clear_in_cr4 (unsigne 93.24 #define IOBMP_BYTES 8192 93.25 #define IOBMP_INVALID_OFFSET 0x8000 93.26 93.27 -struct i387_state { 93.28 - u8 state[512]; /* big enough for FXSAVE */ 93.29 -} __attribute__ ((aligned (16))); 93.30 - 93.31 struct tss_struct { 93.32 unsigned short back_link,__blh; 93.33 #ifdef __x86_64__ 93.34 @@ -382,16 +380,18 @@ extern struct tss_struct init_tss[NR_CPU 93.35 #ifdef ARCH_HAS_FAST_TRAP 93.36 93.37 #define SET_DEFAULT_FAST_TRAP(_p) \ 93.38 - (_p)->fast_trap_idx = 0x20; \ 93.39 + (_p)->guest_context.fast_trap_idx = 0x20; \ 93.40 (_p)->fast_trap_desc.a = 0; \ 93.41 (_p)->fast_trap_desc.b = 0; 93.42 93.43 #define CLEAR_FAST_TRAP(_p) \ 93.44 - (memset(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \ 93.45 - 0, 8)) 93.46 + (memset(idt_tables[smp_processor_id()] + \ 93.47 + (_p)->guest_context.fast_trap_idx, \ 93.48 + 0, 8)) 93.49 93.50 #define SET_FAST_TRAP(_p) \ 93.51 - (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \ 93.52 + (memcpy(idt_tables[smp_processor_id()] + \ 93.53 + (_p)->guest_context.fast_trap_idx, \ 93.54 &((_p)->fast_trap_desc), 8)) 93.55 93.56 long set_fast_trap(struct exec_domain *p, int idx); 93.57 @@ -405,7 +405,7 @@ long set_fast_trap(struct exec_domain *p 93.58 93.59 #endif 93.60 93.61 -extern int gpf_emulate_4gb(struct xen_regs *regs); 93.62 +extern int gpf_emulate_4gb(struct cpu_user_regs *regs); 93.63 93.64 extern void write_ptbase(struct exec_domain *ed); 93.65 93.66 @@ -499,9 +499,9 @@ extern inline void prefetchw(const void 93.67 void show_guest_stack(); 93.68 void show_trace(unsigned long *esp); 93.69 void show_stack(unsigned long *esp); 93.70 -void show_registers(struct xen_regs *regs); 93.71 +void show_registers(struct cpu_user_regs *regs); 93.72 void show_page_walk(unsigned long addr); 93.73 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs); 93.74 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs); 93.75 93.76 #endif /* !__ASSEMBLY__ */ 93.77
94.1 --- a/xen/include/asm-x86/shadow.h Thu Apr 28 13:54:01 2005 +0000 94.2 +++ b/xen/include/asm-x86/shadow.h Fri Apr 29 07:34:47 2005 +0000 94.3 @@ -63,7 +63,7 @@ 94.4 94.5 extern void shadow_mode_init(void); 94.6 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc); 94.7 -extern int shadow_fault(unsigned long va, struct xen_regs *regs); 94.8 +extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs); 94.9 extern int shadow_mode_enable(struct domain *p, unsigned int mode); 94.10 extern void shadow_invlpg(struct exec_domain *, unsigned long); 94.11 extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
95.1 --- a/xen/include/asm-x86/vmx.h Thu Apr 28 13:54:01 2005 +0000 95.2 +++ b/xen/include/asm-x86/vmx.h Fri Apr 29 07:34:47 2005 +0000 95.3 @@ -25,7 +25,7 @@ 95.4 #include <asm/processor.h> 95.5 #include <asm/vmx_vmcs.h> 95.6 95.7 -extern void vmx_asm_vmexit_handler(struct xen_regs); 95.8 +extern void vmx_asm_vmexit_handler(struct cpu_user_regs); 95.9 extern void vmx_asm_do_resume(void); 95.10 extern void vmx_asm_do_launch(void); 95.11 extern void vmx_intr_assist(struct exec_domain *d);
96.1 --- a/xen/include/asm-x86/vmx_platform.h Thu Apr 28 13:54:01 2005 +0000 96.2 +++ b/xen/include/asm-x86/vmx_platform.h Fri Apr 29 07:34:47 2005 +0000 96.3 @@ -73,7 +73,7 @@ struct instruction { 96.4 struct mi_per_cpu_info 96.5 { 96.6 unsigned long mmio_target; 96.7 - struct xen_regs *inst_decoder_regs; 96.8 + struct cpu_user_regs *inst_decoder_regs; 96.9 }; 96.10 96.11 struct virutal_platform_def { 96.12 @@ -85,7 +85,7 @@ struct virutal_platform_def { 96.13 }; 96.14 96.15 extern void handle_mmio(unsigned long, unsigned long); 96.16 -extern int vmx_setup_platform(struct exec_domain *, execution_context_t *); 96.17 +extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *); 96.18 96.19 // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. 96.20 #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
97.1 --- a/xen/include/asm-x86/vmx_vmcs.h Thu Apr 28 13:54:01 2005 +0000 97.2 +++ b/xen/include/asm-x86/vmx_vmcs.h Fri Apr 29 07:34:47 2005 +0000 97.3 @@ -65,8 +65,8 @@ void free_vmcs(struct vmcs_struct *); 97.4 int load_vmcs(struct arch_vmx_struct *, u64); 97.5 int store_vmcs(struct arch_vmx_struct *, u64); 97.6 void dump_vmcs(void); 97.7 -int construct_vmcs(struct arch_vmx_struct *, execution_context_t *, 97.8 - full_execution_context_t *, int); 97.9 +int construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *, 97.10 + struct vcpu_guest_context *, int); 97.11 97.12 #define VMCS_USE_HOST_ENV 1 97.13 #define VMCS_USE_SEPARATE_ENV 0
98.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h Thu Apr 28 13:54:01 2005 +0000 98.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h Fri Apr 29 07:34:47 2005 +0000 98.3 @@ -4,81 +4,81 @@ 98.4 /* Maybe auto-generate the following two cases (quoted vs. unquoted). */ 98.5 #ifndef __ASSEMBLY__ 98.6 98.7 -#define __SAVE_ALL_PRE \ 98.8 - "cld;" \ 98.9 - "pushl %eax;" \ 98.10 - "pushl %ebp;" \ 98.11 - "pushl %edi;" \ 98.12 - "pushl %esi;" \ 98.13 - "pushl %edx;" \ 98.14 - "pushl %ecx;" \ 98.15 - "pushl %ebx;" \ 98.16 - "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \ 98.17 - "jz 2f;" \ 98.18 - "call setup_vm86_frame;" \ 98.19 - "jmp 3f;" \ 98.20 - "2:testb $3,"STR(XREGS_cs)"(%esp);" \ 98.21 - "jz 1f;" \ 98.22 - "movl %ds,"STR(XREGS_ds)"(%esp);" \ 98.23 - "movl %es,"STR(XREGS_es)"(%esp);" \ 98.24 - "movl %fs,"STR(XREGS_fs)"(%esp);" \ 98.25 - "movl %gs,"STR(XREGS_gs)"(%esp);" \ 98.26 +#define __SAVE_ALL_PRE \ 98.27 + "cld;" \ 98.28 + "pushl %eax;" \ 98.29 + "pushl %ebp;" \ 98.30 + "pushl %edi;" \ 98.31 + "pushl %esi;" \ 98.32 + "pushl %edx;" \ 98.33 + "pushl %ecx;" \ 98.34 + "pushl %ebx;" \ 98.35 + "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \ 98.36 + "jz 2f;" \ 98.37 + "call setup_vm86_frame;" \ 98.38 + "jmp 3f;" \ 98.39 + "2:testb $3,"STR(UREGS_cs)"(%esp);" \ 98.40 + "jz 1f;" \ 98.41 + "movl %ds,"STR(UREGS_ds)"(%esp);" \ 98.42 + "movl %es,"STR(UREGS_es)"(%esp);" \ 98.43 + "movl %fs,"STR(UREGS_fs)"(%esp);" \ 98.44 + "movl %gs,"STR(UREGS_gs)"(%esp);" \ 98.45 "3:" 98.46 98.47 -#define SAVE_ALL_NOSEGREGS(_reg) \ 98.48 - __SAVE_ALL_PRE \ 98.49 +#define SAVE_ALL_NOSEGREGS(_reg) \ 98.50 + __SAVE_ALL_PRE \ 98.51 "1:" 98.52 98.53 -#define SET_XEN_SEGMENTS(_reg) \ 98.54 - "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \ 98.55 - "movl %e"STR(_reg)"x,%ds;" \ 98.56 +#define SET_XEN_SEGMENTS(_reg) \ 98.57 + "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \ 98.58 + "movl %e"STR(_reg)"x,%ds;" \ 98.59 "movl %e"STR(_reg)"x,%es;" 98.60 98.61 -#define SAVE_ALL(_reg) \ 98.62 - __SAVE_ALL_PRE \ 98.63 - SET_XEN_SEGMENTS(_reg) \ 98.64 +#define SAVE_ALL(_reg) \ 98.65 + __SAVE_ALL_PRE \ 98.66 + SET_XEN_SEGMENTS(_reg) \ 98.67 "1:" 98.68 98.69 #else 98.70 98.71 -#define __SAVE_ALL_PRE \ 98.72 - cld; \ 98.73 - pushl %eax; \ 98.74 - pushl %ebp; \ 98.75 - pushl %edi; \ 98.76 - pushl %esi; \ 98.77 - pushl %edx; \ 98.78 - pushl %ecx; \ 98.79 - pushl %ebx; \ 98.80 - testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \ 98.81 - jz 2f; \ 98.82 - call setup_vm86_frame; \ 98.83 - jmp 3f; \ 98.84 - 2:testb $3,XREGS_cs(%esp); \ 98.85 - jz 1f; \ 98.86 - movl %ds,XREGS_ds(%esp); \ 98.87 - movl %es,XREGS_es(%esp); \ 98.88 - movl %fs,XREGS_fs(%esp); \ 98.89 - movl %gs,XREGS_gs(%esp); \ 98.90 +#define __SAVE_ALL_PRE \ 98.91 + cld; \ 98.92 + pushl %eax; \ 98.93 + pushl %ebp; \ 98.94 + pushl %edi; \ 98.95 + pushl %esi; \ 98.96 + pushl %edx; \ 98.97 + pushl %ecx; \ 98.98 + pushl %ebx; \ 98.99 + testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \ 98.100 + jz 2f; \ 98.101 + call setup_vm86_frame; \ 98.102 + jmp 3f; \ 98.103 + 2:testb $3,UREGS_cs(%esp); \ 98.104 + jz 1f; \ 98.105 + movl %ds,UREGS_ds(%esp); \ 98.106 + movl %es,UREGS_es(%esp); \ 98.107 + movl %fs,UREGS_fs(%esp); \ 98.108 + movl %gs,UREGS_gs(%esp); \ 98.109 3: 98.110 98.111 -#define SAVE_ALL_NOSEGREGS(_reg) \ 98.112 - __SAVE_ALL_PRE \ 98.113 +#define SAVE_ALL_NOSEGREGS(_reg) \ 98.114 + __SAVE_ALL_PRE \ 98.115 1: 98.116 98.117 -#define SET_XEN_SEGMENTS(_reg) \ 98.118 - movl $(__HYPERVISOR_DS),%e ## _reg ## x; \ 98.119 - movl %e ## _reg ## x,%ds; \ 98.120 +#define SET_XEN_SEGMENTS(_reg) \ 98.121 + movl $(__HYPERVISOR_DS),%e ## _reg ## x; \ 98.122 + movl %e ## _reg ## x,%ds; \ 98.123 movl %e ## _reg ## x,%es; 98.124 98.125 -#define SAVE_ALL(_reg) \ 98.126 - __SAVE_ALL_PRE \ 98.127 - SET_XEN_SEGMENTS(_reg) \ 98.128 +#define SAVE_ALL(_reg) \ 98.129 + __SAVE_ALL_PRE \ 98.130 + SET_XEN_SEGMENTS(_reg) \ 98.131 1: 98.132 98.133 #ifdef PERF_COUNTERS 98.134 -#define PERFC_INCR(_name,_idx) \ 98.135 - lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4) 98.136 +#define PERFC_INCR(_name,_idx) \ 98.137 + lock incl perfcounters+_name(,_idx,4) 98.138 #else 98.139 #define PERFC_INCR(_name,_idx) 98.140 #endif 98.141 @@ -86,50 +86,50 @@ 98.142 #endif 98.143 98.144 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v) 98.145 -#define XBUILD_SMP_INTERRUPT(x,v)\ 98.146 -asmlinkage void x(void); \ 98.147 -__asm__( \ 98.148 - "\n"__ALIGN_STR"\n" \ 98.149 - SYMBOL_NAME_STR(x) ":\n\t" \ 98.150 - "pushl $"#v"<<16\n\t" \ 98.151 - SAVE_ALL(a) \ 98.152 - "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \ 98.153 +#define XBUILD_SMP_INTERRUPT(x,v) \ 98.154 +asmlinkage void x(void); \ 98.155 +__asm__( \ 98.156 + "\n"__ALIGN_STR"\n" \ 98.157 + STR(x) ":\n\t" \ 98.158 + "pushl $"#v"<<16\n\t" \ 98.159 + SAVE_ALL(a) \ 98.160 + "call "STR(smp_##x)"\n\t" \ 98.161 "jmp ret_from_intr\n"); 98.162 98.163 #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v) 98.164 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ 98.165 -asmlinkage void x(struct xen_regs * regs); \ 98.166 -__asm__( \ 98.167 -"\n"__ALIGN_STR"\n" \ 98.168 -SYMBOL_NAME_STR(x) ":\n\t" \ 98.169 - "pushl $"#v"<<16\n\t" \ 98.170 - SAVE_ALL(a) \ 98.171 - "movl %esp,%eax\n\t" \ 98.172 - "pushl %eax\n\t" \ 98.173 - "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \ 98.174 - "addl $4,%esp\n\t" \ 98.175 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ 98.176 +asmlinkage void x(struct cpu_user_regs * regs); \ 98.177 +__asm__( \ 98.178 +"\n"__ALIGN_STR"\n" \ 98.179 +STR(x) ":\n\t" \ 98.180 + "pushl $"#v"<<16\n\t" \ 98.181 + SAVE_ALL(a) \ 98.182 + "movl %esp,%eax\n\t" \ 98.183 + "pushl %eax\n\t" \ 98.184 + "call "STR(smp_##x)"\n\t" \ 98.185 + "addl $4,%esp\n\t" \ 98.186 "jmp ret_from_intr\n"); 98.187 98.188 -#define BUILD_COMMON_IRQ() \ 98.189 -__asm__( \ 98.190 - "\n" __ALIGN_STR"\n" \ 98.191 - "common_interrupt:\n\t" \ 98.192 - SAVE_ALL(a) \ 98.193 - "movl %esp,%eax\n\t" \ 98.194 - "pushl %eax\n\t" \ 98.195 - "call " SYMBOL_NAME_STR(do_IRQ) "\n\t" \ 98.196 - "addl $4,%esp\n\t" \ 98.197 +#define BUILD_COMMON_IRQ() \ 98.198 +__asm__( \ 98.199 + "\n" __ALIGN_STR"\n" \ 98.200 + "common_interrupt:\n\t" \ 98.201 + SAVE_ALL(a) \ 98.202 + "movl %esp,%eax\n\t" \ 98.203 + "pushl %eax\n\t" \ 98.204 + "call " STR(do_IRQ) "\n\t" \ 98.205 + "addl $4,%esp\n\t" \ 98.206 "jmp ret_from_intr\n"); 98.207 98.208 #define IRQ_NAME2(nr) nr##_interrupt(void) 98.209 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) 98.210 98.211 -#define BUILD_IRQ(nr) \ 98.212 -asmlinkage void IRQ_NAME(nr); \ 98.213 -__asm__( \ 98.214 -"\n"__ALIGN_STR"\n" \ 98.215 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ 98.216 - "pushl $"#nr"<<16\n\t" \ 98.217 +#define BUILD_IRQ(nr) \ 98.218 +asmlinkage void IRQ_NAME(nr); \ 98.219 +__asm__( \ 98.220 +"\n"__ALIGN_STR"\n" \ 98.221 +STR(IRQ) #nr "_interrupt:\n\t" \ 98.222 + "pushl $"#nr"<<16\n\t" \ 98.223 "jmp common_interrupt"); 98.224 98.225 #endif /* __X86_32_ASM_DEFNS_H__ */
99.1 --- a/xen/include/asm-x86/x86_32/current.h Thu Apr 28 13:54:01 2005 +0000 99.2 +++ b/xen/include/asm-x86/x86_32/current.h Fri Apr 29 07:34:47 2005 +0000 99.3 @@ -5,7 +5,7 @@ 99.4 struct domain; 99.5 99.6 #define STACK_RESERVED \ 99.7 - (sizeof(execution_context_t) + sizeof(struct domain *)) 99.8 + (sizeof(struct cpu_user_regs) + sizeof(struct domain *)) 99.9 99.10 static inline struct exec_domain *get_current(void) 99.11 { 99.12 @@ -23,13 +23,13 @@ static inline void set_current(struct ex 99.13 : : "r" (STACK_SIZE-4), "r" (ed) ); 99.14 } 99.15 99.16 -static inline execution_context_t *get_execution_context(void) 99.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void) 99.18 { 99.19 - execution_context_t *execution_context; 99.20 + struct cpu_user_regs *cpu_user_regs; 99.21 __asm__ ( "andl %%esp,%0; addl %2,%0" 99.22 - : "=r" (execution_context) 99.23 + : "=r" (cpu_user_regs) 99.24 : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); 99.25 - return execution_context; 99.26 + return cpu_user_regs; 99.27 } 99.28 99.29 /* 99.30 @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bo 99.31 #define reset_stack_and_jump(__fn) \ 99.32 __asm__ __volatile__ ( \ 99.33 "movl %0,%%esp; jmp "STR(__fn) \ 99.34 - : : "r" (get_execution_context()) ) 99.35 + : : "r" (get_cpu_user_regs()) ) 99.36 99.37 #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) 99.38
100.1 --- a/xen/include/asm-x86/x86_32/regs.h Thu Apr 28 13:54:01 2005 +0000 100.2 +++ b/xen/include/asm-x86/x86_32/regs.h Fri Apr 29 07:34:47 2005 +0000 100.3 @@ -16,6 +16,6 @@ 100.4 ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3))) 100.5 100.6 /* Number of bytes of on-stack execution state to be context-switched. */ 100.7 -#define CTXT_SWITCH_STACK_BYTES (sizeof(execution_context_t)) 100.8 +#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs)) 100.9 100.10 #endif
101.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h Thu Apr 28 13:54:01 2005 +0000 101.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h Fri Apr 29 07:34:47 2005 +0000 101.3 @@ -4,87 +4,87 @@ 101.4 /* Maybe auto-generate the following two cases (quoted vs. unquoted). */ 101.5 #ifndef __ASSEMBLY__ 101.6 101.7 -#define SAVE_ALL \ 101.8 - "cld;" \ 101.9 - "pushq %rdi;" \ 101.10 - "pushq %rsi;" \ 101.11 - "pushq %rdx;" \ 101.12 - "pushq %rcx;" \ 101.13 - "pushq %rax;" \ 101.14 - "pushq %r8;" \ 101.15 - "pushq %r9;" \ 101.16 - "pushq %r10;" \ 101.17 - "pushq %r11;" \ 101.18 - "pushq %rbx;" \ 101.19 - "pushq %rbp;" \ 101.20 - "pushq %r12;" \ 101.21 - "pushq %r13;" \ 101.22 - "pushq %r14;" \ 101.23 +#define SAVE_ALL \ 101.24 + "cld;" \ 101.25 + "pushq %rdi;" \ 101.26 + "pushq %rsi;" \ 101.27 + "pushq %rdx;" \ 101.28 + "pushq %rcx;" \ 101.29 + "pushq %rax;" \ 101.30 + "pushq %r8;" \ 101.31 + "pushq %r9;" \ 101.32 + "pushq %r10;" \ 101.33 + "pushq %r11;" \ 101.34 + "pushq %rbx;" \ 101.35 + "pushq %rbp;" \ 101.36 + "pushq %r12;" \ 101.37 + "pushq %r13;" \ 101.38 + "pushq %r14;" \ 101.39 "pushq %r15;" 101.40 101.41 -#define RESTORE_ALL \ 101.42 - "popq %r15;" \ 101.43 - "popq %r14;" \ 101.44 - "popq %r13;" \ 101.45 - "popq %r12;" \ 101.46 - "popq %rbp;" \ 101.47 - "popq %rbx;" \ 101.48 - "popq %r11;" \ 101.49 - "popq %r10;" \ 101.50 - "popq %r9;" \ 101.51 - "popq %r8;" \ 101.52 - "popq %rax;" \ 101.53 - "popq %rcx;" \ 101.54 - "popq %rdx;" \ 101.55 - "popq %rsi;" \ 101.56 +#define RESTORE_ALL \ 101.57 + "popq %r15;" \ 101.58 + "popq %r14;" \ 101.59 + "popq %r13;" \ 101.60 + "popq %r12;" \ 101.61 + "popq %rbp;" \ 101.62 + "popq %rbx;" \ 101.63 + "popq %r11;" \ 101.64 + "popq %r10;" \ 101.65 + "popq %r9;" \ 101.66 + "popq %r8;" \ 101.67 + "popq %rax;" \ 101.68 + "popq %rcx;" \ 101.69 + "popq %rdx;" \ 101.70 + "popq %rsi;" \ 101.71 "popq %rdi;" 101.72 101.73 /* Work around AMD erratum #88 */ 101.74 -#define safe_swapgs \ 101.75 +#define safe_swapgs \ 101.76 "mfence; swapgs;" 101.77 101.78 #else 101.79 101.80 -#define SAVE_ALL \ 101.81 - cld; \ 101.82 - pushq %rdi; \ 101.83 - pushq %rsi; \ 101.84 - pushq %rdx; \ 101.85 - pushq %rcx; \ 101.86 - pushq %rax; \ 101.87 - pushq %r8; \ 101.88 - pushq %r9; \ 101.89 - pushq %r10; \ 101.90 - pushq %r11; \ 101.91 - pushq %rbx; \ 101.92 - pushq %rbp; \ 101.93 - pushq %r12; \ 101.94 - pushq %r13; \ 101.95 - pushq %r14; \ 101.96 +#define SAVE_ALL \ 101.97 + cld; \ 101.98 + pushq %rdi; \ 101.99 + pushq %rsi; \ 101.100 + pushq %rdx; \ 101.101 + pushq %rcx; \ 101.102 + pushq %rax; \ 101.103 + pushq %r8; \ 101.104 + pushq %r9; \ 101.105 + pushq %r10; \ 101.106 + pushq %r11; \ 101.107 + pushq %rbx; \ 101.108 + pushq %rbp; \ 101.109 + pushq %r12; \ 101.110 + pushq %r13; \ 101.111 + pushq %r14; \ 101.112 pushq %r15; 101.113 101.114 -#define RESTORE_ALL \ 101.115 - popq %r15; \ 101.116 - popq %r14; \ 101.117 - popq %r13; \ 101.118 - popq %r12; \ 101.119 - popq %rbp; \ 101.120 - popq %rbx; \ 101.121 - popq %r11; \ 101.122 - popq %r10; \ 101.123 - popq %r9; \ 101.124 - popq %r8; \ 101.125 - popq %rax; \ 101.126 - popq %rcx; \ 101.127 - popq %rdx; \ 101.128 - popq %rsi; \ 101.129 +#define RESTORE_ALL \ 101.130 + popq %r15; \ 101.131 + popq %r14; \ 101.132 + popq %r13; \ 101.133 + popq %r12; \ 101.134 + popq %rbp; \ 101.135 + popq %rbx; \ 101.136 + popq %r11; \ 101.137 + popq %r10; \ 101.138 + popq %r9; \ 101.139 + popq %r8; \ 101.140 + popq %rax; \ 101.141 + popq %rcx; \ 101.142 + popq %rdx; \ 101.143 + popq %rsi; \ 101.144 popq %rdi; 101.145 101.146 #ifdef PERF_COUNTERS 101.147 -#define PERFC_INCR(_name,_idx) \ 101.148 - pushq %rdx; \ 101.149 - leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \ 101.150 - lock incl (%rdx,_idx,4); \ 101.151 +#define PERFC_INCR(_name,_idx) \ 101.152 + pushq %rdx; \ 101.153 + leaq perfcounters+_name(%rip),%rdx; \ 101.154 + lock incl (%rdx,_idx,4); \ 101.155 popq %rdx; 101.156 #else 101.157 #define PERFC_INCR(_name,_idx) 101.158 @@ -93,49 +93,49 @@ 101.159 #endif 101.160 101.161 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v) 101.162 -#define XBUILD_SMP_INTERRUPT(x,v)\ 101.163 -asmlinkage void x(void); \ 101.164 -__asm__( \ 101.165 - "\n"__ALIGN_STR"\n" \ 101.166 - SYMBOL_NAME_STR(x) ":\n\t" \ 101.167 - "pushq $0\n\t" \ 101.168 - "movl $"#v",4(%rsp)\n\t" \ 101.169 - SAVE_ALL \ 101.170 - "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \ 101.171 +#define XBUILD_SMP_INTERRUPT(x,v) \ 101.172 +asmlinkage void x(void); \ 101.173 +__asm__( \ 101.174 + "\n"__ALIGN_STR"\n" \ 101.175 + STR(x) ":\n\t" \ 101.176 + "pushq $0\n\t" \ 101.177 + "movl $"#v",4(%rsp)\n\t" \ 101.178 + SAVE_ALL \ 101.179 + "callq "STR(smp_##x)"\n\t" \ 101.180 "jmp ret_from_intr\n"); 101.181 101.182 #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v) 101.183 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ 101.184 -asmlinkage void x(struct xen_regs * regs); \ 101.185 -__asm__( \ 101.186 -"\n"__ALIGN_STR"\n" \ 101.187 -SYMBOL_NAME_STR(x) ":\n\t" \ 101.188 - "pushq $0\n\t" \ 101.189 - "movl $"#v",4(%rsp)\n\t" \ 101.190 - SAVE_ALL \ 101.191 - "movq %rsp,%rdi\n\t" \ 101.192 - "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \ 101.193 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ 101.194 +asmlinkage void x(struct cpu_user_regs * regs); \ 101.195 +__asm__( \ 101.196 +"\n"__ALIGN_STR"\n" \ 101.197 +STR(x) ":\n\t" \ 101.198 + "pushq $0\n\t" \ 101.199 + "movl $"#v",4(%rsp)\n\t" \ 101.200 + SAVE_ALL \ 101.201 + "movq %rsp,%rdi\n\t" \ 101.202 + "callq "STR(smp_##x)"\n\t" \ 101.203 "jmp ret_from_intr\n"); 101.204 101.205 -#define BUILD_COMMON_IRQ() \ 101.206 -__asm__( \ 101.207 - "\n" __ALIGN_STR"\n" \ 101.208 - "common_interrupt:\n\t" \ 101.209 - SAVE_ALL \ 101.210 - "movq %rsp,%rdi\n\t" \ 101.211 - "callq " SYMBOL_NAME_STR(do_IRQ) "\n\t" \ 101.212 +#define BUILD_COMMON_IRQ() \ 101.213 +__asm__( \ 101.214 + "\n" __ALIGN_STR"\n" \ 101.215 + "common_interrupt:\n\t" \ 101.216 + SAVE_ALL \ 101.217 + "movq %rsp,%rdi\n\t" \ 101.218 + "callq " STR(do_IRQ) "\n\t" \ 101.219 "jmp ret_from_intr\n"); 101.220 101.221 #define IRQ_NAME2(nr) nr##_interrupt(void) 101.222 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) 101.223 101.224 -#define BUILD_IRQ(nr) \ 101.225 -asmlinkage void IRQ_NAME(nr); \ 101.226 -__asm__( \ 101.227 -"\n"__ALIGN_STR"\n" \ 101.228 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ 101.229 - "pushq $0\n\t" \ 101.230 - "movl $"#nr",4(%rsp)\n\t" \ 101.231 +#define BUILD_IRQ(nr) \ 101.232 +asmlinkage void IRQ_NAME(nr); \ 101.233 +__asm__( \ 101.234 +"\n"__ALIGN_STR"\n" \ 101.235 +STR(IRQ) #nr "_interrupt:\n\t" \ 101.236 + "pushq $0\n\t" \ 101.237 + "movl $"#nr",4(%rsp)\n\t" \ 101.238 "jmp common_interrupt"); 101.239 101.240 #endif /* __X86_64_ASM_DEFNS_H__ */
102.1 --- a/xen/include/asm-x86/x86_64/current.h Thu Apr 28 13:54:01 2005 +0000 102.2 +++ b/xen/include/asm-x86/x86_64/current.h Fri Apr 29 07:34:47 2005 +0000 102.3 @@ -5,7 +5,7 @@ 102.4 struct domain; 102.5 102.6 #define STACK_RESERVED \ 102.7 - (sizeof(execution_context_t) + sizeof(struct domain *)) 102.8 + (sizeof(struct cpu_user_regs) + sizeof(struct domain *)) 102.9 102.10 static inline struct exec_domain *get_current(void) 102.11 { 102.12 @@ -23,33 +23,33 @@ static inline void set_current(struct ex 102.13 : : "r" (STACK_SIZE-8), "r" (ed) ); 102.14 } 102.15 102.16 -static inline execution_context_t *get_execution_context(void) 102.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void) 102.18 { 102.19 - execution_context_t *execution_context; 102.20 + struct cpu_user_regs *cpu_user_regs; 102.21 __asm__( "andq %%rsp,%0; addq %2,%0" 102.22 - : "=r" (execution_context) 102.23 + : "=r" (cpu_user_regs) 102.24 : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); 102.25 - return execution_context; 102.26 + return cpu_user_regs; 102.27 } 102.28 102.29 /* 102.30 * Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually 102.31 - * 64 bytes before the real bottom of the stack to allow space for: 102.32 - * domain pointer, DS, ES, FS, GS, FS_BASE, GS_BASE_OS, GS_BASE_APP 102.33 + * 40 bytes before the real bottom of the stack to allow space for: 102.34 + * domain pointer, DS, ES, FS, GS 102.35 */ 102.36 static inline unsigned long get_stack_bottom(void) 102.37 { 102.38 unsigned long p; 102.39 __asm__( "andq %%rsp,%0; addq %2,%0" 102.40 : "=r" (p) 102.41 - : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-64) ); 102.42 + : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-40) ); 102.43 return p; 102.44 } 102.45 102.46 #define reset_stack_and_jump(__fn) \ 102.47 __asm__ __volatile__ ( \ 102.48 "movq %0,%%rsp; jmp "STR(__fn) \ 102.49 - : : "r" (get_execution_context()) ) 102.50 + : : "r" (get_cpu_user_regs()) ) 102.51 102.52 #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) 102.53
103.1 --- a/xen/include/asm-x86/x86_64/regs.h Thu Apr 28 13:54:01 2005 +0000 103.2 +++ b/xen/include/asm-x86/x86_64/regs.h Fri Apr 29 07:34:47 2005 +0000 103.3 @@ -17,6 +17,6 @@ 103.4 103.5 /* Number of bytes of on-stack execution state to be context-switched. */ 103.6 /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */ 103.7 -#define CTXT_SWITCH_STACK_BYTES (offsetof(execution_context_t, es)) 103.8 +#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es)) 103.9 103.10 #endif
104.1 --- a/xen/include/asm-x86/x86_emulate.h Thu Apr 28 13:54:01 2005 +0000 104.2 +++ b/xen/include/asm-x86/x86_emulate.h Fri Apr 29 07:34:47 2005 +0000 104.3 @@ -139,7 +139,7 @@ x86_emulate_write_std( 104.4 unsigned long val, 104.5 unsigned int bytes); 104.6 104.7 -struct xen_regs; 104.8 +struct cpu_user_regs; 104.9 104.10 /* 104.11 * x86_emulate_memop: Emulate an instruction that faulted attempting to 104.12 @@ -152,7 +152,7 @@ struct xen_regs; 104.13 */ 104.14 extern int 104.15 x86_emulate_memop( 104.16 - struct xen_regs *regs, 104.17 + struct cpu_user_regs *regs, 104.18 unsigned long cr2, 104.19 struct x86_mem_emulator *ops, 104.20 int mode); 104.21 @@ -164,6 +164,6 @@ x86_emulate_memop( 104.22 */ 104.23 extern void * 104.24 decode_register( 104.25 - u8 modrm_reg, struct xen_regs *regs, int highbyte_regs); 104.26 + u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs); 104.27 104.28 #endif /* __X86_EMULATE_H__ */
105.1 --- a/xen/include/public/arch-ia64.h Thu Apr 28 13:54:01 2005 +0000 105.2 +++ b/xen/include/public/arch-ia64.h Fri Apr 29 07:34:47 2005 +0000 105.3 @@ -22,7 +22,7 @@ typedef unsigned long cpureg_t; /* Ful 105.4 105.5 typedef struct 105.6 { 105.7 -} PACKED execution_context_t; 105.8 +} PACKED struct cpu_user_regs; 105.9 105.10 /* 105.11 * NB. This may become a 64-bit count with no shift. If this happens then the 105.12 @@ -91,9 +91,9 @@ typedef struct { 105.13 * The following is all CPU context. Note that the i387_ctxt block is filled 105.14 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. 105.15 */ 105.16 -typedef struct { 105.17 +typedef struct vcpu_guest_context { 105.18 //unsigned long flags; 105.19 -} PACKED full_execution_context_t; 105.20 +} PACKED vcpu_guest_context_t; 105.21 105.22 #endif /* !__ASSEMBLY__ */ 105.23
106.1 --- a/xen/include/public/arch-x86_32.h Thu Apr 28 13:54:01 2005 +0000 106.2 +++ b/xen/include/public/arch-x86_32.h Fri Apr 29 07:34:47 2005 +0000 106.3 @@ -97,8 +97,7 @@ typedef struct { 106.4 memory_t address; /* 4: code address */ 106.5 } PACKED trap_info_t; /* 8 bytes */ 106.6 106.7 -typedef struct xen_regs 106.8 -{ 106.9 +typedef struct cpu_user_regs { 106.10 u32 ebx; 106.11 u32 ecx; 106.12 u32 edx; 106.13 @@ -117,26 +116,27 @@ typedef struct xen_regs 106.14 u32 ds; 106.15 u32 fs; 106.16 u32 gs; 106.17 -} PACKED execution_context_t; 106.18 +} cpu_user_regs_t; 106.19 106.20 typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ 106.21 106.22 /* 106.23 - * The following is all CPU context. Note that the i387_ctxt block is filled 106.24 + * The following is all CPU context. Note that the fpu_ctxt block is filled 106.25 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. 106.26 */ 106.27 -typedef struct { 106.28 -#define ECF_I387_VALID (1<<0) 106.29 -#define ECF_VMX_GUEST (1<<1) 106.30 -#define ECF_IN_KERNEL (1<<2) 106.31 - unsigned long flags; 106.32 - execution_context_t cpu_ctxt; /* User-level CPU registers */ 106.33 - char fpu_ctxt[256]; /* User-level FPU registers */ 106.34 +typedef struct vcpu_guest_context { 106.35 +#define VGCF_I387_VALID (1<<0) 106.36 +#define VGCF_VMX_GUEST (1<<1) 106.37 +#define VGCF_IN_KERNEL (1<<2) 106.38 + unsigned long flags; /* VGCF_* flags */ 106.39 + cpu_user_regs_t user_regs; /* User-level CPU registers */ 106.40 + struct { char x[512]; } fpu_ctxt /* User-level FPU registers */ 106.41 + __attribute__((__aligned__(16))); /* (needs 16-byte alignment) */ 106.42 trap_info_t trap_ctxt[256]; /* Virtual IDT */ 106.43 unsigned int fast_trap_idx; /* "Fast trap" vector offset */ 106.44 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 106.45 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 106.46 - unsigned long kernel_ss, kernel_esp; /* Virtual TSS (only SS1/ESP1) */ 106.47 + unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 106.48 unsigned long pt_base; /* CR3 (pagetable base) */ 106.49 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 106.50 unsigned long event_callback_cs; /* CS:EIP of event callback */ 106.51 @@ -144,15 +144,15 @@ typedef struct { 106.52 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ 106.53 unsigned long failsafe_callback_eip; 106.54 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ 106.55 -} PACKED full_execution_context_t; 106.56 +} vcpu_guest_context_t; 106.57 106.58 typedef struct { 106.59 /* MFN of a table of MFNs that make up p2m table */ 106.60 u64 pfn_to_mfn_frame_list; 106.61 -} PACKED arch_shared_info_t; 106.62 +} arch_shared_info_t; 106.63 106.64 typedef struct { 106.65 -} PACKED arch_vcpu_info_t; 106.66 +} arch_vcpu_info_t; 106.67 106.68 #define ARCH_HAS_FAST_TRAP 106.69
107.1 --- a/xen/include/public/arch-x86_64.h Thu Apr 28 13:54:01 2005 +0000 107.2 +++ b/xen/include/public/arch-x86_64.h Fri Apr 29 07:34:47 2005 +0000 107.3 @@ -101,7 +101,7 @@ 107.4 * int HYPERVISOR_switch_to_user(void) 107.5 * All arguments are on the kernel stack, in the following format. 107.6 * Never returns if successful. Current kernel context is lost. 107.7 - * If flags contains ECF_IN_SYSCALL: 107.8 + * If flags contains VGCF_IN_SYSCALL: 107.9 * Restore RAX, RIP, RFLAGS, RSP. 107.10 * Discard R11, RCX, CS, SS. 107.11 * Otherwise: 107.12 @@ -109,7 +109,7 @@ 107.13 * All other registers are saved on hypercall entry and restored to user. 107.14 */ 107.15 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ 107.16 -#define ECF_IN_SYSCALL (1<<8) 107.17 +#define VGCF_IN_SYSCALL (1<<8) 107.18 struct switch_to_user { 107.19 /* Top of stack (%rsp at point of hypercall). */ 107.20 u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; 107.21 @@ -142,8 +142,7 @@ typedef struct { 107.22 memory_t address; /* 8: code address */ 107.23 } PACKED trap_info_t; /* 16 bytes */ 107.24 107.25 -typedef struct xen_regs 107.26 -{ 107.27 +typedef struct cpu_user_regs { 107.28 u64 r15; 107.29 u64 r14; 107.30 u64 r13; 107.31 @@ -168,45 +167,47 @@ typedef struct xen_regs 107.32 u64 ss; 107.33 u64 es; 107.34 u64 ds; 107.35 - u64 fs; /* Non-zero => takes precedence over fs_base. */ 107.36 - u64 gs; /* Non-zero => takes precedence over gs_base_app. */ 107.37 - u64 fs_base; 107.38 - u64 gs_base_kernel; 107.39 - u64 gs_base_user; 107.40 -} PACKED execution_context_t; 107.41 + u64 fs; /* Non-zero => takes precedence over fs_base. */ 107.42 + u64 gs; /* Non-zero => takes precedence over gs_base_user. */ 107.43 +} cpu_user_regs_t; 107.44 107.45 typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ 107.46 107.47 /* 107.48 - * The following is all CPU context. Note that the i387_ctxt block is filled 107.49 + * The following is all CPU context. Note that the fpu_ctxt block is filled 107.50 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. 107.51 */ 107.52 -typedef struct { 107.53 -#define ECF_I387_VALID (1<<0) 107.54 -#define ECF_VMX_GUEST (1<<1) 107.55 -#define ECF_IN_KERNEL (1<<2) 107.56 - unsigned long flags; 107.57 - execution_context_t cpu_ctxt; /* User-level CPU registers */ 107.58 - char fpu_ctxt[512]; /* User-level FPU registers */ 107.59 +typedef struct vcpu_guest_context { 107.60 +#define VGCF_I387_VALID (1<<0) 107.61 +#define VGCF_VMX_GUEST (1<<1) 107.62 +#define VGCF_IN_KERNEL (1<<2) 107.63 + unsigned long flags; /* VGCF_* flags */ 107.64 + cpu_user_regs_t user_regs; /* User-level CPU registers */ 107.65 + struct { char x[512]; } fpu_ctxt /* User-level FPU registers */ 107.66 + __attribute__((__aligned__(16))); /* (needs 16-byte alignment) */ 107.67 trap_info_t trap_ctxt[256]; /* Virtual IDT */ 107.68 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 107.69 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 107.70 - unsigned long kernel_ss, kernel_esp; /* Virtual TSS (only SS1/ESP1) */ 107.71 + unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 107.72 unsigned long pt_base; /* CR3 (pagetable base) */ 107.73 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 107.74 unsigned long event_callback_eip; 107.75 unsigned long failsafe_callback_eip; 107.76 unsigned long syscall_callback_eip; 107.77 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ 107.78 -} PACKED full_execution_context_t; 107.79 + /* Segment base addresses. */ 107.80 + u64 fs_base; 107.81 + u64 gs_base_kernel; 107.82 + u64 gs_base_user; 107.83 +} vcpu_guest_context_t; 107.84 107.85 typedef struct { 107.86 /* MFN of a table of MFNs that make up p2m table */ 107.87 u64 pfn_to_mfn_frame_list; 107.88 -} PACKED arch_shared_info_t; 107.89 +} arch_shared_info_t; 107.90 107.91 typedef struct { 107.92 -} PACKED arch_vcpu_info_t; 107.93 +} arch_vcpu_info_t; 107.94 107.95 #endif /* !__ASSEMBLY__ */ 107.96
108.1 --- a/xen/include/public/dom0_ops.h Thu Apr 28 13:54:01 2005 +0000 108.2 +++ b/xen/include/public/dom0_ops.h Fri Apr 29 07:34:47 2005 +0000 108.3 @@ -83,7 +83,7 @@ typedef struct { 108.4 #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */ 108.5 #define DOMFLAGS_SHUTDOWNSHIFT 16 108.6 u32 flags; 108.7 - full_execution_context_t *ctxt; /* NB. IN/OUT variable. */ 108.8 + vcpu_guest_context_t *ctxt; /* NB. IN/OUT variable. */ 108.9 memory_t tot_pages; 108.10 memory_t max_pages; 108.11 memory_t shared_info_frame; /* MFN of shared_info struct */ 108.12 @@ -96,7 +96,7 @@ typedef struct { 108.13 domid_t domain; 108.14 u16 exec_domain; 108.15 /* IN/OUT parameters */ 108.16 - full_execution_context_t *ctxt; 108.17 + vcpu_guest_context_t *ctxt; 108.18 } dom0_setdomaininfo_t; 108.19 108.20 #define DOM0_MSR 15
109.1 --- a/xen/include/xen/domain.h Thu Apr 28 13:54:01 2005 +0000 109.2 +++ b/xen/include/xen/domain.h Fri Apr 29 07:34:47 2005 +0000 109.3 @@ -15,7 +15,7 @@ extern void arch_do_createdomain(struct 109.4 extern void arch_do_boot_vcpu(struct exec_domain *ed); 109.5 109.6 extern int arch_set_info_guest( 109.7 - struct exec_domain *d, full_execution_context_t *c); 109.8 + struct exec_domain *d, struct vcpu_guest_context *c); 109.9 109.10 extern void free_perdomain_pt(struct domain *d); 109.11
110.1 --- a/xen/include/xen/irq.h Thu Apr 28 13:54:01 2005 +0000 110.2 +++ b/xen/include/xen/irq.h Fri Apr 29 07:34:47 2005 +0000 110.3 @@ -8,7 +8,7 @@ 110.4 110.5 struct irqaction 110.6 { 110.7 - void (*handler)(int, void *, struct xen_regs *); 110.8 + void (*handler)(int, void *, struct cpu_user_regs *); 110.9 const char *name; 110.10 void *dev_id; 110.11 }; 110.12 @@ -63,7 +63,7 @@ extern int setup_irq(unsigned int, struc 110.13 extern void free_irq(unsigned int); 110.14 110.15 extern hw_irq_controller no_irq_type; 110.16 -extern void no_action(int cpl, void *dev_id, struct xen_regs *regs); 110.17 +extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs); 110.18 110.19 struct domain; 110.20 struct exec_domain;
111.1 --- a/xen/include/xen/keyhandler.h Thu Apr 28 13:54:01 2005 +0000 111.2 +++ b/xen/include/xen/keyhandler.h Fri Apr 29 07:34:47 2005 +0000 111.3 @@ -23,11 +23,11 @@ extern void register_keyhandler( 111.4 * synchronously in hard-IRQ context with interrupts disabled. The @regs 111.5 * callback parameter points at the interrupted register context. 111.6 */ 111.7 -typedef void irq_keyhandler_t(unsigned char key, struct xen_regs *regs); 111.8 +typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs); 111.9 extern void register_irq_keyhandler( 111.10 unsigned char key, irq_keyhandler_t *handler, char *desc); 111.11 111.12 /* Inject a keypress into the key-handling subsystem. */ 111.13 -extern void handle_keypress(unsigned char key, struct xen_regs *regs); 111.14 +extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs); 111.15 111.16 #endif /* __XEN_KEYHANDLER_H__ */
112.1 --- a/xen/include/xen/sched.h Thu Apr 28 13:54:01 2005 +0000 112.2 +++ b/xen/include/xen/sched.h Fri Apr 29 07:34:47 2005 +0000 112.3 @@ -210,7 +210,7 @@ static inline void get_knownalive_domain 112.4 atomic_inc(&d->refcnt); 112.5 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED)); 112.6 } 112.7 - 112.8 + 112.9 extern struct domain *do_createdomain( 112.10 domid_t dom_id, unsigned int cpu); 112.11 extern int construct_dom0( 112.12 @@ -265,10 +265,15 @@ extern void sync_lazy_execstate_cpuset(u 112.13 extern void sync_lazy_execstate_all(void); 112.14 extern int __sync_lazy_execstate(void); 112.15 112.16 +/* Called by the scheduler to switch to another exec_domain. */ 112.17 extern void context_switch( 112.18 struct exec_domain *prev, 112.19 struct exec_domain *next); 112.20 112.21 +/* Called by the scheduler to continue running the current exec_domain. */ 112.22 +extern void continue_running( 112.23 + struct exec_domain *same); 112.24 + 112.25 void domain_init(void); 112.26 112.27 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
113.1 --- a/xen/include/xen/serial.h Thu Apr 28 13:54:01 2005 +0000 113.2 +++ b/xen/include/xen/serial.h Fri Apr 29 07:34:47 2005 +0000 113.3 @@ -28,7 +28,7 @@ void serial_init_stage2(void); 113.4 int parse_serial_handle(char *conf); 113.5 113.6 /* Register a character-receive hook on the specified COM port. */ 113.7 -typedef void (*serial_rx_fn)(unsigned char, struct xen_regs *); 113.8 +typedef void (*serial_rx_fn)(unsigned char, struct cpu_user_regs *); 113.9 void serial_set_rx_handler(int handle, serial_rx_fn fn); 113.10 113.11 /* Transmit a single character via the specified COM port. */