ia64/xen-unstable

changeset 9441:8f722ac17efa

Merged.
author emellor@leeni.uk.xensource.com
date Thu Mar 23 17:37:37 2006 +0100 (2006-03-23)
parents 1577043d8e36 bddcfe70fbef
children b7295a83206e
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c	Thu Mar 23 17:27:48 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c	Thu Mar 23 17:37:37 2006 +0100
     1.3 @@ -32,13 +32,13 @@
     1.4  #endif
     1.5  
     1.6  static int direct_remap_area_pte_fn(pte_t *pte, 
     1.7 -				    struct page *pte_page,
     1.8 +				    struct page *pmd_page,
     1.9  				    unsigned long address, 
    1.10  				    void *data)
    1.11  {
    1.12  	mmu_update_t **v = (mmu_update_t **)data;
    1.13  
    1.14 -	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
    1.15 +	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
    1.16  		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
    1.17  	(*v)++;
    1.18  
    1.19 @@ -67,9 +67,9 @@ static int __direct_remap_pfn_range(stru
    1.20  	for (i = 0; i < size; i += PAGE_SIZE) {
    1.21  		if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
    1.22  			/* Fill in the PTE pointers. */
    1.23 -			rc = generic_page_range(mm, start_address, 
    1.24 -						address - start_address,
    1.25 -						direct_remap_area_pte_fn, &w);
    1.26 +			rc = apply_to_page_range(mm, start_address, 
    1.27 +						 address - start_address,
    1.28 +						 direct_remap_area_pte_fn, &w);
    1.29  			if (rc)
    1.30  				goto out;
    1.31  			w = u;
    1.32 @@ -93,8 +93,9 @@ static int __direct_remap_pfn_range(stru
    1.33  
    1.34  	if (v != u) {
    1.35  		/* get the ptep's filled in */
    1.36 -		rc = generic_page_range(mm, start_address, address - start_address,
    1.37 -				   direct_remap_area_pte_fn, &w);
    1.38 +		rc = apply_to_page_range(mm, start_address,
    1.39 +					 address - start_address,
    1.40 +					 direct_remap_area_pte_fn, &w);
    1.41  		if (rc)
    1.42  			goto out;
    1.43  		rc = -EFAULT;
    1.44 @@ -142,11 +143,11 @@ int direct_kernel_remap_pfn_range(unsign
    1.45  EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
    1.46  
    1.47  static int lookup_pte_fn(
    1.48 -	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
    1.49 +	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
    1.50  {
    1.51  	uint64_t *ptep = (uint64_t *)data;
    1.52  	if (ptep)
    1.53 -		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
    1.54 +		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
    1.55  			 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
    1.56  	return 0;
    1.57  }
    1.58 @@ -155,13 +156,14 @@ int create_lookup_pte_addr(struct mm_str
    1.59  			   unsigned long address,
    1.60  			   uint64_t *ptep)
    1.61  {
    1.62 -	return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
    1.63 +	return apply_to_page_range(mm, address, PAGE_SIZE,
    1.64 +				   lookup_pte_fn, ptep);
    1.65  }
    1.66  
    1.67  EXPORT_SYMBOL(create_lookup_pte_addr);
    1.68  
    1.69  static int noop_fn(
    1.70 -	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
    1.71 +	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
    1.72  {
    1.73  	return 0;
    1.74  }
    1.75 @@ -170,7 +172,7 @@ int touch_pte_range(struct mm_struct *mm
    1.76  		    unsigned long address,
    1.77  		    unsigned long size)
    1.78  {
    1.79 -	return generic_page_range(mm, address, size, noop_fn, NULL);
    1.80 +	return apply_to_page_range(mm, address, size, noop_fn, NULL);
    1.81  } 
    1.82  
    1.83  EXPORT_SYMBOL(touch_pte_range);
     2.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/apic-xen.c	Thu Mar 23 17:27:48 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/apic-xen.c	Thu Mar 23 17:37:37 2006 +0100
     2.3 @@ -114,8 +114,6 @@ void smp_apic_timer_interrupt(struct pt_
     2.4  	irq_exit();
     2.5  }
     2.6  
     2.7 -int __initdata unsync_tsc_on_multicluster;
     2.8 -
     2.9  /*
    2.10   * This interrupt should _never_ happen with our APIC/SMP architecture
    2.11   */
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Thu Mar 23 17:27:48 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Thu Mar 23 17:37:37 2006 +0100
     3.3 @@ -517,7 +517,7 @@ void balloon_update_driver_allowance(lon
     3.4  }
     3.5  
     3.6  static int dealloc_pte_fn(
     3.7 -	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
     3.8 +	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
     3.9  {
    3.10  	unsigned long mfn = pte_mfn(*pte);
    3.11  	int ret;
    3.12 @@ -547,8 +547,8 @@ struct page *balloon_alloc_empty_page_ra
    3.13  	scrub_pages(vstart, 1 << order);
    3.14  
    3.15  	balloon_lock(flags);
    3.16 -	ret = generic_page_range(
    3.17 -		&init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
    3.18 +	ret = apply_to_page_range(&init_mm, vstart,
    3.19 +				  PAGE_SIZE << order, dealloc_pte_fn, NULL);
    3.20  	BUG_ON(ret);
    3.21  	current_pages -= 1UL << order;
    3.22  	totalram_pages = current_pages;
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Thu Mar 23 17:27:48 2006 +0100
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Thu Mar 23 17:37:37 2006 +0100
     4.3 @@ -360,7 +360,7 @@ gnttab_request_free_callback(struct gntt
     4.4  }
     4.5  
     4.6  #ifndef __ia64__
     4.7 -static int map_pte_fn(pte_t *pte, struct page *pte_page,
     4.8 +static int map_pte_fn(pte_t *pte, struct page *pmd_page,
     4.9  		      unsigned long addr, void *data)
    4.10  {
    4.11  	unsigned long **frames = (unsigned long **)data;
    4.12 @@ -370,7 +370,7 @@ static int map_pte_fn(pte_t *pte, struct
    4.13  	return 0;
    4.14  }
    4.15  
    4.16 -static int unmap_pte_fn(pte_t *pte, struct page *pte_page,
    4.17 +static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
    4.18  		      unsigned long addr, void *data)
    4.19  {
    4.20  
    4.21 @@ -384,6 +384,7 @@ gnttab_resume(void)
    4.22  {
    4.23  	gnttab_setup_table_t setup;
    4.24  	unsigned long frames[NR_GRANT_FRAMES];
    4.25 +	int rc;
    4.26  #ifndef __ia64__
    4.27  	void *pframes = frames;
    4.28  	struct vm_struct *area;
    4.29 @@ -393,8 +394,8 @@ gnttab_resume(void)
    4.30  	setup.nr_frames  = NR_GRANT_FRAMES;
    4.31  	setup.frame_list = frames;
    4.32  
    4.33 -	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
    4.34 -	BUG_ON(setup.status != 0);
    4.35 +	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
    4.36 +	BUG_ON(rc || setup.status);
    4.37  
    4.38  #ifndef __ia64__
    4.39  	if (shared == NULL) {
    4.40 @@ -402,9 +403,10 @@ gnttab_resume(void)
    4.41  		BUG_ON(area == NULL);
    4.42  		shared = area->addr;
    4.43  	}
    4.44 -	BUG_ON(generic_page_range(&init_mm, (unsigned long)shared,
    4.45 -				  PAGE_SIZE * NR_GRANT_FRAMES,
    4.46 -				  map_pte_fn, &pframes));
    4.47 +	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
    4.48 +				 PAGE_SIZE * NR_GRANT_FRAMES,
    4.49 +				 map_pte_fn, &pframes);
    4.50 +	BUG_ON(rc);
    4.51  #else
    4.52  	shared = __va(frames[0] << PAGE_SHIFT);
    4.53  	printk("grant table at %p\n", shared);
    4.54 @@ -418,9 +420,9 @@ gnttab_suspend(void)
    4.55  {
    4.56  
    4.57  #ifndef __ia64__
    4.58 -	generic_page_range(&init_mm, (unsigned long)shared,
    4.59 -			   PAGE_SIZE * NR_GRANT_FRAMES,
    4.60 -			   unmap_pte_fn, NULL);
    4.61 +	apply_to_page_range(&init_mm, (unsigned long)shared,
    4.62 +			    PAGE_SIZE * NR_GRANT_FRAMES,
    4.63 +			    unmap_pte_fn, NULL);
    4.64  #endif
    4.65  
    4.66  	return 0;
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/util.c	Thu Mar 23 17:27:48 2006 +0100
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/util.c	Thu Mar 23 17:37:37 2006 +0100
     5.3 @@ -6,9 +6,9 @@
     5.4  #include <asm/uaccess.h>
     5.5  #include <xen/driver_util.h>
     5.6  
     5.7 -static int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
     5.8 +static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
     5.9  {
    5.10 -	/* generic_page_range() does all the hard work. */
    5.11 +	/* apply_to_page_range() does all the hard work. */
    5.12  	return 0;
    5.13  }
    5.14  
    5.15 @@ -24,8 +24,8 @@ struct vm_struct *alloc_vm_area(unsigned
    5.16  	 * This ensures that page tables are constructed for this region
    5.17  	 * of kernel virtual address space and mapped into init_mm.
    5.18  	 */
    5.19 -	if (generic_page_range(&init_mm, (unsigned long)area->addr,
    5.20 -			       area->size, f, NULL)) {
    5.21 +	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
    5.22 +				area->size, f, NULL)) {
    5.23  		free_vm_area(area);
    5.24  		return NULL;
    5.25  	}
     6.1 --- a/linux-2.6-xen-sparse/include/linux/mm.h	Thu Mar 23 17:27:48 2006 +0100
     6.2 +++ b/linux-2.6-xen-sparse/include/linux/mm.h	Thu Mar 23 17:37:37 2006 +0100
     6.3 @@ -1020,10 +1020,10 @@ struct page *follow_page(struct vm_area_
     6.4  #define FOLL_ANON	0x08	/* give ZERO_PAGE if no pgtable */
     6.5  
     6.6  #ifdef CONFIG_XEN
     6.7 -typedef int (*pte_fn_t)(pte_t *pte, struct page *pte_page, unsigned long addr,
     6.8 -                        void *data);
     6.9 -extern int generic_page_range(struct mm_struct *mm, unsigned long address,
    6.10 -                              unsigned long size, pte_fn_t fn, void *data);
    6.11 +typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
    6.12 +			void *data);
    6.13 +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
    6.14 +			       unsigned long size, pte_fn_t fn, void *data);
    6.15  #endif
    6.16  
    6.17  #ifdef CONFIG_PROC_FS
     7.1 --- a/linux-2.6-xen-sparse/mm/memory.c	Thu Mar 23 17:27:48 2006 +0100
     7.2 +++ b/linux-2.6-xen-sparse/mm/memory.c	Thu Mar 23 17:37:37 2006 +0100
     7.3 @@ -1378,36 +1378,39 @@ int remap_pfn_range(struct vm_area_struc
     7.4  EXPORT_SYMBOL(remap_pfn_range);
     7.5  
     7.6  #ifdef CONFIG_XEN
     7.7 -static inline int generic_pte_range(struct mm_struct *mm, pmd_t *pmd,
     7.8 -				    unsigned long addr, unsigned long end,
     7.9 -				    pte_fn_t fn, void *data)
    7.10 +static inline int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
    7.11 +				     unsigned long addr, unsigned long end,
    7.12 +				     pte_fn_t fn, void *data)
    7.13  {
    7.14  	pte_t *pte;
    7.15  	int err;
    7.16 -	struct page *pte_page;
    7.17 +	struct page *pmd_page;
    7.18 +	spinlock_t *ptl;
    7.19  
    7.20  	pte = (mm == &init_mm) ?
    7.21  		pte_alloc_kernel(pmd, addr) :
    7.22 -		pte_alloc_map(mm, pmd, addr);
    7.23 +		pte_alloc_map_lock(mm, pmd, addr, &ptl);
    7.24  	if (!pte)
    7.25  		return -ENOMEM;
    7.26  
    7.27 -	pte_page = pmd_page(*pmd);
    7.28 +	BUG_ON(pmd_huge(*pmd));
    7.29 +
    7.30 +	pmd_page = pmd_page(*pmd);
    7.31  
    7.32  	do {
    7.33 -		err = fn(pte, pte_page, addr, data);
    7.34 +		err = fn(pte, pmd_page, addr, data);
    7.35  		if (err)
    7.36  			break;
    7.37  	} while (pte++, addr += PAGE_SIZE, addr != end);
    7.38  
    7.39  	if (mm != &init_mm)
    7.40 -		pte_unmap(pte-1);
    7.41 +		pte_unmap_unlock(pte-1, ptl);
    7.42  	return err;
    7.43  }
    7.44  
    7.45 -static inline int generic_pmd_range(struct mm_struct *mm, pud_t *pud,
    7.46 -				    unsigned long addr, unsigned long end,
    7.47 -				    pte_fn_t fn, void *data)
    7.48 +static inline int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
    7.49 +				     unsigned long addr, unsigned long end,
    7.50 +				     pte_fn_t fn, void *data)
    7.51  {
    7.52  	pmd_t *pmd;
    7.53  	unsigned long next;
    7.54 @@ -1418,16 +1421,16 @@ static inline int generic_pmd_range(stru
    7.55  		return -ENOMEM;
    7.56  	do {
    7.57  		next = pmd_addr_end(addr, end);
    7.58 -		err = generic_pte_range(mm, pmd, addr, next, fn, data);
    7.59 +		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
    7.60  		if (err)
    7.61  			break;
    7.62  	} while (pmd++, addr = next, addr != end);
    7.63  	return err;
    7.64  }
    7.65  
    7.66 -static inline int generic_pud_range(struct mm_struct *mm, pgd_t *pgd,
    7.67 -				    unsigned long addr, unsigned long end,
    7.68 -				    pte_fn_t fn, void *data)
    7.69 +static inline int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
    7.70 +				     unsigned long addr, unsigned long end,
    7.71 +				     pte_fn_t fn, void *data)
    7.72  {
    7.73  	pud_t *pud;
    7.74  	unsigned long next;
    7.75 @@ -1438,7 +1441,7 @@ static inline int generic_pud_range(stru
    7.76  		return -ENOMEM;
    7.77  	do {
    7.78  		next = pud_addr_end(addr, end);
    7.79 -		err = generic_pmd_range(mm, pud, addr, next, fn, data);
    7.80 +		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
    7.81  		if (err)
    7.82  			break;
    7.83  	} while (pud++, addr = next, addr != end);
    7.84 @@ -1449,8 +1452,8 @@ static inline int generic_pud_range(stru
    7.85   * Scan a region of virtual memory, filling in page tables as necessary
    7.86   * and calling a provided function on each leaf page table.
    7.87   */
    7.88 -int generic_page_range(struct mm_struct *mm, unsigned long addr,
    7.89 -		       unsigned long size, pte_fn_t fn, void *data)
    7.90 +int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
    7.91 +			unsigned long size, pte_fn_t fn, void *data)
    7.92  {
    7.93  	pgd_t *pgd;
    7.94  	unsigned long next;
    7.95 @@ -1461,12 +1464,13 @@ int generic_page_range(struct mm_struct 
    7.96  	pgd = pgd_offset(mm, addr);
    7.97  	do {
    7.98  		next = pgd_addr_end(addr, end);
    7.99 -		err = generic_pud_range(mm, pgd, addr, next, fn, data);
   7.100 +		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
   7.101  		if (err)
   7.102  			break;
   7.103  	} while (pgd++, addr = next, addr != end);
   7.104  	return err;
   7.105  }
   7.106 +EXPORT_SYMBOL_GPL(apply_to_page_range);
   7.107  #endif
   7.108  
   7.109  /*
     8.1 --- a/xen/arch/x86/dom0_ops.c	Thu Mar 23 17:27:48 2006 +0100
     8.2 +++ b/xen/arch/x86/dom0_ops.c	Thu Mar 23 17:37:37 2006 +0100
     8.3 @@ -460,8 +460,7 @@ void arch_getdomaininfo_ctxt(
     8.4  
     8.5      if ( hvm_guest(v) )
     8.6      {
     8.7 -        hvm_store_cpu_guest_regs(v, &c->user_regs);
     8.8 -        hvm_store_cpu_guest_ctrl_regs(v, c->ctrlreg);
     8.9 +        hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
    8.10      }
    8.11      else
    8.12      {
     9.1 --- a/xen/arch/x86/hvm/platform.c	Thu Mar 23 17:27:48 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Mar 23 17:37:37 2006 +0100
     9.3 @@ -773,7 +773,7 @@ void handle_mmio(unsigned long va, unsig
     9.4      mmio_opp = &v->arch.hvm_vcpu.mmio_op;
     9.5  
     9.6      regs = mmio_opp->inst_decoder_regs;
     9.7 -    hvm_store_cpu_guest_regs(v, regs);
     9.8 +    hvm_store_cpu_guest_regs(v, regs, NULL);
     9.9  
    9.10      if ((inst_len = hvm_instruction_length(v)) <= 0) {
    9.11          printf("handle_mmio: failed to get instruction length\n");
    10.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Mar 23 17:27:48 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Mar 23 17:37:37 2006 +0100
    10.3 @@ -201,31 +201,41 @@ int svm_initialize_guest_resources(struc
    10.4  }
    10.5  
    10.6  static void svm_store_cpu_guest_regs(
    10.7 -    struct vcpu *v, struct cpu_user_regs *regs)
    10.8 +    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
    10.9  {
   10.10      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   10.11  
   10.12 +    if ( regs != NULL )
   10.13 +    {
   10.14  #if defined (__x86_64__)
   10.15 -    regs->rip    = vmcb->rip;
   10.16 -    regs->rsp    = vmcb->rsp;
   10.17 -    regs->rflags = vmcb->rflags;
   10.18 -    regs->cs     = vmcb->cs.sel;
   10.19 -    regs->ds     = vmcb->ds.sel;
   10.20 -    regs->es     = vmcb->es.sel;
   10.21 -    regs->ss     = vmcb->ss.sel;
   10.22 -    regs->gs     = vmcb->gs.sel;
   10.23 -    regs->fs     = vmcb->fs.sel;
   10.24 +        regs->rip    = vmcb->rip;
   10.25 +        regs->rsp    = vmcb->rsp;
   10.26 +        regs->rflags = vmcb->rflags;
   10.27 +        regs->cs     = vmcb->cs.sel;
   10.28 +        regs->ds     = vmcb->ds.sel;
   10.29 +        regs->es     = vmcb->es.sel;
   10.30 +        regs->ss     = vmcb->ss.sel;
   10.31 +        regs->gs     = vmcb->gs.sel;
   10.32 +        regs->fs     = vmcb->fs.sel;
   10.33  #elif defined (__i386__)
   10.34 -    regs->eip    = vmcb->rip;
   10.35 -    regs->esp    = vmcb->rsp;
   10.36 -    regs->eflags = vmcb->rflags;
   10.37 -    regs->cs     = vmcb->cs.sel;
   10.38 -    regs->ds     = vmcb->ds.sel;
   10.39 -    regs->es     = vmcb->es.sel;
   10.40 -    regs->ss     = vmcb->ss.sel;
   10.41 -    regs->gs     = vmcb->gs.sel;
   10.42 -    regs->fs     = vmcb->fs.sel;
   10.43 +        regs->eip    = vmcb->rip;
   10.44 +        regs->esp    = vmcb->rsp;
   10.45 +        regs->eflags = vmcb->rflags;
   10.46 +        regs->cs     = vmcb->cs.sel;
   10.47 +        regs->ds     = vmcb->ds.sel;
   10.48 +        regs->es     = vmcb->es.sel;
   10.49 +        regs->ss     = vmcb->ss.sel;
   10.50 +        regs->gs     = vmcb->gs.sel;
   10.51 +        regs->fs     = vmcb->fs.sel;
   10.52  #endif
   10.53 +    }
   10.54 +
   10.55 +    if ( crs != NULL )
   10.56 +    {
   10.57 +        crs[0] = vmcb->cr0;
   10.58 +        crs[3] = vmcb->cr3;
   10.59 +        crs[4] = vmcb->cr4;
   10.60 +    }
   10.61  }
   10.62  
   10.63  static void svm_load_cpu_guest_regs(
   10.64 @@ -372,15 +382,6 @@ static inline int long_mode_do_msr_write
   10.65      return 1;
   10.66  }
   10.67  
   10.68 -void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
   10.69 -{
   10.70 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   10.71 -
   10.72 -    crs[0] = vmcb->cr0;
   10.73 -    crs[3] = vmcb->cr3;
   10.74 -    crs[4] = vmcb->cr4;
   10.75 -}
   10.76 -
   10.77  void svm_modify_guest_state(struct vcpu *v)
   10.78  {
   10.79      svm_modify_vmcb(v, &v->arch.guest_context.user_regs);
   10.80 @@ -448,7 +449,6 @@ int start_svm(void)
   10.81      hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
   10.82      hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
   10.83  
   10.84 -    hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs;
   10.85      hvm_funcs.modify_guest_state = svm_modify_guest_state;
   10.86  
   10.87      hvm_funcs.realmode = svm_realmode;
    11.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Mar 23 17:27:48 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Mar 23 17:37:37 2006 +0100
    11.3 @@ -398,31 +398,81 @@ void vmx_migrate_timers(struct vcpu *v)
    11.4          migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
    11.5  }
    11.6  
    11.7 -void vmx_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
    11.8 +struct vmx_store_cpu_guest_regs_callback_info {
    11.9 +    struct vcpu *v;
   11.10 +    struct cpu_user_regs *regs;
   11.11 +    unsigned long *crs;
   11.12 +};
   11.13 +
   11.14 +static void vmx_store_cpu_guest_regs(
   11.15 +    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
   11.16 +
   11.17 +static void vmx_store_cpu_guest_regs_callback(void *data)
   11.18  {
   11.19 +    struct vmx_store_cpu_guest_regs_callback_info *info = data;
   11.20 +    vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
   11.21 +}
   11.22 +
   11.23 +static void vmx_store_cpu_guest_regs(
   11.24 +    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
   11.25 +{
   11.26 +    if ( v != current )
   11.27 +    {
   11.28 +        /* Non-current VCPUs must be paused to get a register snapshot. */
   11.29 +        ASSERT(atomic_read(&v->pausecnt) != 0);
   11.30 +
   11.31 +        if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
   11.32 +        {
   11.33 +            /* Get register details from remote CPU. */
   11.34 +            struct vmx_store_cpu_guest_regs_callback_info info = {
   11.35 +                .v = v, .regs = regs, .crs = crs };
   11.36 +            cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
   11.37 +            on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
   11.38 +                             &info, 1, 1);
   11.39 +            return;
   11.40 +        }
   11.41 +
   11.42 +        /* Register details are on this CPU. Load the correct VMCS. */
   11.43 +        __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
   11.44 +    }
   11.45 +
   11.46 +    ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
   11.47 +
   11.48 +    if ( regs != NULL )
   11.49 +    {
   11.50  #if defined (__x86_64__)
   11.51 -    __vmread(GUEST_RFLAGS, &regs->rflags);
   11.52 -    __vmread(GUEST_SS_SELECTOR, &regs->ss);
   11.53 -    __vmread(GUEST_CS_SELECTOR, &regs->cs);
   11.54 -    __vmread(GUEST_DS_SELECTOR, &regs->ds);
   11.55 -    __vmread(GUEST_ES_SELECTOR, &regs->es);
   11.56 -    __vmread(GUEST_GS_SELECTOR, &regs->gs);
   11.57 -    __vmread(GUEST_FS_SELECTOR, &regs->fs);
   11.58 -    __vmread(GUEST_RIP, &regs->rip);
   11.59 -    __vmread(GUEST_RSP, &regs->rsp);
   11.60 +        __vmread(GUEST_RFLAGS, &regs->rflags);
   11.61 +        __vmread(GUEST_SS_SELECTOR, &regs->ss);
   11.62 +        __vmread(GUEST_CS_SELECTOR, &regs->cs);
   11.63 +        __vmread(GUEST_DS_SELECTOR, &regs->ds);
   11.64 +        __vmread(GUEST_ES_SELECTOR, &regs->es);
   11.65 +        __vmread(GUEST_GS_SELECTOR, &regs->gs);
   11.66 +        __vmread(GUEST_FS_SELECTOR, &regs->fs);
   11.67 +        __vmread(GUEST_RIP, &regs->rip);
   11.68 +        __vmread(GUEST_RSP, &regs->rsp);
   11.69  #elif defined (__i386__)
   11.70 -    __vmread(GUEST_RFLAGS, &regs->eflags);
   11.71 -    __vmread(GUEST_SS_SELECTOR, &regs->ss);
   11.72 -    __vmread(GUEST_CS_SELECTOR, &regs->cs);
   11.73 -    __vmread(GUEST_DS_SELECTOR, &regs->ds);
   11.74 -    __vmread(GUEST_ES_SELECTOR, &regs->es);
   11.75 -    __vmread(GUEST_GS_SELECTOR, &regs->gs);
   11.76 -    __vmread(GUEST_FS_SELECTOR, &regs->fs);
   11.77 -    __vmread(GUEST_RIP, &regs->eip);
   11.78 -    __vmread(GUEST_RSP, &regs->esp);
   11.79 -#else
   11.80 -#error Unsupported architecture
   11.81 +        __vmread(GUEST_RFLAGS, &regs->eflags);
   11.82 +        __vmread(GUEST_SS_SELECTOR, &regs->ss);
   11.83 +        __vmread(GUEST_CS_SELECTOR, &regs->cs);
   11.84 +        __vmread(GUEST_DS_SELECTOR, &regs->ds);
   11.85 +        __vmread(GUEST_ES_SELECTOR, &regs->es);
   11.86 +        __vmread(GUEST_GS_SELECTOR, &regs->gs);
   11.87 +        __vmread(GUEST_FS_SELECTOR, &regs->fs);
   11.88 +        __vmread(GUEST_RIP, &regs->eip);
   11.89 +        __vmread(GUEST_RSP, &regs->esp);
   11.90  #endif
   11.91 +    }
   11.92 +
   11.93 +    if ( crs != NULL )
   11.94 +    {
   11.95 +        __vmread(CR0_READ_SHADOW, &crs[0]);
   11.96 +        __vmread(GUEST_CR3, &crs[3]);
   11.97 +        __vmread(CR4_READ_SHADOW, &crs[4]);
   11.98 +    }
   11.99 +
  11.100 +    /* Reload current VCPU's VMCS if it was temporarily unloaded. */
  11.101 +    if ( (v != current) && hvm_guest(current) )
  11.102 +        __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
  11.103  }
  11.104  
  11.105  void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
  11.106 @@ -456,13 +506,6 @@ void vmx_load_cpu_guest_regs(struct vcpu
  11.107  #endif
  11.108  }
  11.109  
  11.110 -void vmx_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
  11.111 -{
  11.112 -    __vmread(CR0_READ_SHADOW, &crs[0]);
  11.113 -    __vmread(GUEST_CR3, &crs[3]);
  11.114 -    __vmread(CR4_READ_SHADOW, &crs[4]);
  11.115 -}
  11.116 -
  11.117  void vmx_modify_guest_state(struct vcpu *v)
  11.118  {
  11.119      modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs);
  11.120 @@ -616,7 +659,6 @@ int start_vmx(void)
  11.121      hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
  11.122      hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
  11.123  
  11.124 -    hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs;
  11.125      hvm_funcs.modify_guest_state = vmx_modify_guest_state;
  11.126  
  11.127      hvm_funcs.realmode = vmx_realmode;
    12.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Mar 23 17:27:48 2006 +0100
    12.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Mar 23 17:37:37 2006 +0100
    12.3 @@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs
    12.4      if ( hvm_guest(current) && guest_mode(regs) )
    12.5      {
    12.6          context = "hvm";
    12.7 -        hvm_store_cpu_guest_regs(current, &fault_regs);
    12.8 -        hvm_store_cpu_guest_ctrl_regs(current, fault_crs);
    12.9 +        hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
   12.10      }
   12.11      else
   12.12      {
    13.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Mar 23 17:27:48 2006 +0100
    13.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Mar 23 17:37:37 2006 +0100
    13.3 @@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs
    13.4      if ( hvm_guest(current) && guest_mode(regs) )
    13.5      {
    13.6          context = "hvm";
    13.7 -        hvm_store_cpu_guest_regs(current, &fault_regs);
    13.8 -        hvm_store_cpu_guest_ctrl_regs(current, fault_crs);
    13.9 +        hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
   13.10      }
   13.11      else
   13.12      {
    14.1 --- a/xen/common/gdbstub.c	Thu Mar 23 17:27:48 2006 +0100
    14.2 +++ b/xen/common/gdbstub.c	Thu Mar 23 17:37:37 2006 +0100
    14.3 @@ -562,6 +562,7 @@ initialise_gdb(void)
    14.4      gdb_ctx->serhnd = serial_parse_handle(opt_gdb);
    14.5      if ( gdb_ctx->serhnd != -1 )
    14.6          printk("GDB stub initialised.\n");
    14.7 +    serial_start_sync(gdb_ctx->serhnd);
    14.8  }
    14.9  
   14.10  /*
    15.1 --- a/xen/drivers/char/ns16550.c	Thu Mar 23 17:27:48 2006 +0100
    15.2 +++ b/xen/drivers/char/ns16550.c	Thu Mar 23 17:37:37 2006 +0100
    15.3 @@ -121,8 +121,11 @@ static void ns16550_interrupt(
    15.4  
    15.5      while ( !(ns_read_reg(uart, IIR) & IIR_NOINT) )
    15.6      {
    15.7 -        serial_tx_interrupt(port, regs);
    15.8 -        serial_rx_interrupt(port, regs);
    15.9 +        char lsr = ns_read_reg(uart, LSR);
   15.10 +        if ( lsr & LSR_THRE )
   15.11 +            serial_tx_interrupt(port, regs);
   15.12 +        if ( lsr & LSR_DR )
   15.13 +            serial_rx_interrupt(port, regs);
   15.14      }
   15.15  }
   15.16  
    16.1 --- a/xen/drivers/char/serial.c	Thu Mar 23 17:27:48 2006 +0100
    16.2 +++ b/xen/drivers/char/serial.c	Thu Mar 23 17:37:37 2006 +0100
    16.3 @@ -7,6 +7,7 @@
    16.4   */
    16.5  
    16.6  #include <xen/config.h>
    16.7 +#include <xen/delay.h>
    16.8  #include <xen/init.h>
    16.9  #include <xen/irq.h>
   16.10  #include <xen/keyhandler.h> 
   16.11 @@ -15,8 +16,8 @@
   16.12  #include <xen/serial.h>
   16.13  
   16.14  static struct serial_port com[2] = {
   16.15 -    { .lock = SPIN_LOCK_UNLOCKED }, 
   16.16 -    { .lock = SPIN_LOCK_UNLOCKED }
   16.17 +    { .rx_lock = SPIN_LOCK_UNLOCKED, .tx_lock = SPIN_LOCK_UNLOCKED }, 
   16.18 +    { .rx_lock = SPIN_LOCK_UNLOCKED, .tx_lock = SPIN_LOCK_UNLOCKED }
   16.19  };
   16.20  
   16.21  void serial_rx_interrupt(struct serial_port *port, struct cpu_user_regs *regs)
   16.22 @@ -25,7 +26,7 @@ void serial_rx_interrupt(struct serial_p
   16.23      serial_rx_fn fn = NULL;
   16.24      unsigned long flags;
   16.25  
   16.26 -    spin_lock_irqsave(&port->lock, flags);
   16.27 +    spin_lock_irqsave(&port->rx_lock, flags);
   16.28  
   16.29      if ( port->driver->getc(port, &c) )
   16.30      {
   16.31 @@ -39,7 +40,7 @@ void serial_rx_interrupt(struct serial_p
   16.32              port->rxbuf[MASK_SERIAL_RXBUF_IDX(port->rxbufp++)] = c;            
   16.33      }
   16.34  
   16.35 -    spin_unlock_irqrestore(&port->lock, flags);
   16.36 +    spin_unlock_irqrestore(&port->rx_lock, flags);
   16.37  
   16.38      if ( fn != NULL )
   16.39          (*fn)(c & 0x7f, regs);
   16.40 @@ -50,7 +51,19 @@ void serial_tx_interrupt(struct serial_p
   16.41      int i;
   16.42      unsigned long flags;
   16.43  
   16.44 -    spin_lock_irqsave(&port->lock, flags);
   16.45 +    local_irq_save(flags);
   16.46 +
   16.47 +    /*
   16.48 +     * Avoid spinning for a long time: if there is a long-term lock holder
   16.49 +     * then we know that they'll be stuffing bytes into the transmitter which
   16.50 +     * will therefore not be empty for long.
   16.51 +     */
   16.52 +    while ( !spin_trylock(&port->tx_lock) )
   16.53 +    {
   16.54 +        if ( !port->driver->tx_empty(port) )
   16.55 +            return;
   16.56 +        cpu_relax();
   16.57 +    }
   16.58  
   16.59      if ( port->driver->tx_empty(port) )
   16.60      {
   16.61 @@ -63,7 +76,7 @@ void serial_tx_interrupt(struct serial_p
   16.62          }
   16.63      }
   16.64  
   16.65 -    spin_unlock_irqrestore(&port->lock, flags);
   16.66 +    spin_unlock_irqrestore(&port->tx_lock, flags);
   16.67  }
   16.68  
   16.69  static void __serial_putc(struct serial_port *port, char c)
   16.70 @@ -117,7 +130,7 @@ void serial_putc(int handle, char c)
   16.71      if ( (handle == -1) || !port->driver || !port->driver->putc )
   16.72          return;
   16.73  
   16.74 -    spin_lock_irqsave(&port->lock, flags);
   16.75 +    spin_lock_irqsave(&port->tx_lock, flags);
   16.76  
   16.77      if ( (c == '\n') && (handle & SERHND_COOKED) )
   16.78          __serial_putc(port, '\r');
   16.79 @@ -129,7 +142,7 @@ void serial_putc(int handle, char c)
   16.80  
   16.81      __serial_putc(port, c);
   16.82  
   16.83 -    spin_unlock_irqrestore(&port->lock, flags);
   16.84 +    spin_unlock_irqrestore(&port->tx_lock, flags);
   16.85  }
   16.86  
   16.87  void serial_puts(int handle, const char *s)
   16.88 @@ -141,7 +154,7 @@ void serial_puts(int handle, const char 
   16.89      if ( (handle == -1) || !port->driver || !port->driver->putc )
   16.90          return;
   16.91  
   16.92 -    spin_lock_irqsave(&port->lock, flags);
   16.93 +    spin_lock_irqsave(&port->tx_lock, flags);
   16.94  
   16.95      while ( (c = *s++) != '\0' )
   16.96      {
   16.97 @@ -156,7 +169,7 @@ void serial_puts(int handle, const char 
   16.98          __serial_putc(port, c);
   16.99      }
  16.100  
  16.101 -    spin_unlock_irqrestore(&port->lock, flags);
  16.102 +    spin_unlock_irqrestore(&port->tx_lock, flags);
  16.103  }
  16.104  
  16.105  char serial_getc(int handle)
  16.106 @@ -168,27 +181,28 @@ char serial_getc(int handle)
  16.107      if ( (handle == -1) || !port->driver || !port->driver->getc )
  16.108          return '\0';
  16.109  
  16.110 -    do {        
  16.111 +    do {
  16.112          for ( ; ; )
  16.113          {
  16.114 -            spin_lock_irqsave(&port->lock, flags);
  16.115 +            spin_lock_irqsave(&port->rx_lock, flags);
  16.116              
  16.117              if ( port->rxbufp != port->rxbufc )
  16.118              {
  16.119                  c = port->rxbuf[MASK_SERIAL_RXBUF_IDX(port->rxbufc++)];
  16.120 -                spin_unlock_irqrestore(&port->lock, flags);
  16.121 +                spin_unlock_irqrestore(&port->rx_lock, flags);
  16.122                  break;
  16.123              }
  16.124              
  16.125              if ( port->driver->getc(port, &c) )
  16.126              {
  16.127 -                spin_unlock_irqrestore(&port->lock, flags);
  16.128 +                spin_unlock_irqrestore(&port->rx_lock, flags);
  16.129                  break;
  16.130              }
  16.131  
  16.132 -            spin_unlock_irqrestore(&port->lock, flags);
  16.133 +            spin_unlock_irqrestore(&port->rx_lock, flags);
  16.134  
  16.135              cpu_relax();
  16.136 +            udelay(100);
  16.137          }
  16.138      } while ( ((handle & SERHND_LO) &&  (c & 0x80)) ||
  16.139                ((handle & SERHND_HI) && !(c & 0x80)) );
  16.140 @@ -241,7 +255,7 @@ void serial_set_rx_handler(int handle, s
  16.141      if ( handle == -1 )
  16.142          return;
  16.143  
  16.144 -    spin_lock_irqsave(&port->lock, flags);
  16.145 +    spin_lock_irqsave(&port->rx_lock, flags);
  16.146  
  16.147      if ( port->rx != NULL )
  16.148          goto fail;
  16.149 @@ -265,11 +279,11 @@ void serial_set_rx_handler(int handle, s
  16.150          port->rx = fn;
  16.151      }
  16.152  
  16.153 -    spin_unlock_irqrestore(&port->lock, flags);
  16.154 +    spin_unlock_irqrestore(&port->rx_lock, flags);
  16.155      return;
  16.156  
  16.157   fail:
  16.158 -    spin_unlock_irqrestore(&port->lock, flags);
  16.159 +    spin_unlock_irqrestore(&port->rx_lock, flags);
  16.160      printk("ERROR: Conflicting receive handlers for COM%d\n", 
  16.161             handle & SERHND_IDX);
  16.162  }
  16.163 @@ -277,8 +291,13 @@ void serial_set_rx_handler(int handle, s
  16.164  void serial_force_unlock(int handle)
  16.165  {
  16.166      struct serial_port *port = &com[handle & SERHND_IDX];
  16.167 -    if ( handle != -1 )
  16.168 -        port->lock = SPIN_LOCK_UNLOCKED;
  16.169 +
  16.170 +    if ( handle == -1 )
  16.171 +        return;
  16.172 +
  16.173 +    port->rx_lock = SPIN_LOCK_UNLOCKED;
  16.174 +    port->tx_lock = SPIN_LOCK_UNLOCKED;
  16.175 +
  16.176      serial_start_sync(handle);
  16.177  }
  16.178  
  16.179 @@ -290,7 +309,7 @@ void serial_start_sync(int handle)
  16.180      if ( handle == -1 )
  16.181          return;
  16.182      
  16.183 -    spin_lock_irqsave(&port->lock, flags);
  16.184 +    spin_lock_irqsave(&port->tx_lock, flags);
  16.185  
  16.186      if ( port->sync++ == 0 )
  16.187      {
  16.188 @@ -303,7 +322,7 @@ void serial_start_sync(int handle)
  16.189          }
  16.190      }
  16.191  
  16.192 -    spin_unlock_irqrestore(&port->lock, flags);
  16.193 +    spin_unlock_irqrestore(&port->tx_lock, flags);
  16.194  }
  16.195  
  16.196  void serial_end_sync(int handle)
  16.197 @@ -314,11 +333,11 @@ void serial_end_sync(int handle)
  16.198      if ( handle == -1 )
  16.199          return;
  16.200      
  16.201 -    spin_lock_irqsave(&port->lock, flags);
  16.202 +    spin_lock_irqsave(&port->tx_lock, flags);
  16.203  
  16.204      port->sync--;
  16.205  
  16.206 -    spin_unlock_irqrestore(&port->lock, flags);
  16.207 +    spin_unlock_irqrestore(&port->tx_lock, flags);
  16.208  }
  16.209  
  16.210  int serial_tx_space(int handle)
    17.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Mar 23 17:27:48 2006 +0100
    17.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Mar 23 17:37:37 2006 +0100
    17.3 @@ -41,12 +41,12 @@ struct hvm_function_table {
    17.4      /*
    17.5       * Store and load guest state:
    17.6       * 1) load/store guest register state,
    17.7 -     * 2) store guest control register state (used for panic dumps),
    17.8 -     * 3) modify guest state (e.g., set debug flags).
    17.9 +     * 2) modify guest state (e.g., set debug flags).
   17.10       */
   17.11 -    void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
   17.12 -    void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
   17.13 -    void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]);
   17.14 +    void (*store_cpu_guest_regs)(
   17.15 +        struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
   17.16 +    void (*load_cpu_guest_regs)(
   17.17 +        struct vcpu *v, struct cpu_user_regs *r);
   17.18      void (*modify_guest_state)(struct vcpu *v);
   17.19  
   17.20      /*
   17.21 @@ -93,9 +93,10 @@ hvm_relinquish_guest_resources(struct do
   17.22  }
   17.23  
   17.24  static inline void
   17.25 -hvm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
   17.26 +hvm_store_cpu_guest_regs(
   17.27 +    struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
   17.28  {
   17.29 -    hvm_funcs.store_cpu_guest_regs(v, r);
   17.30 +    hvm_funcs.store_cpu_guest_regs(v, r, crs);
   17.31  }
   17.32  
   17.33  static inline void
   17.34 @@ -105,12 +106,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
   17.35  }
   17.36  
   17.37  static inline void
   17.38 -hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
   17.39 -{
   17.40 -    hvm_funcs.store_cpu_guest_ctrl_regs(v, crs);
   17.41 -}
   17.42 -
   17.43 -static inline void
   17.44  hvm_modify_guest_state(struct vcpu *v)
   17.45  {
   17.46      hvm_funcs.modify_guest_state(v);
    18.1 --- a/xen/include/xen/serial.h	Thu Mar 23 17:27:48 2006 +0100
    18.2 +++ b/xen/include/xen/serial.h	Thu Mar 23 17:37:37 2006 +0100
    18.3 @@ -42,7 +42,7 @@ struct serial_port {
    18.4      char                rxbuf[SERIAL_RXBUFSZ];
    18.5      unsigned int        rxbufp, rxbufc;
    18.6      /* Serial I/O is concurrency-safe. */
    18.7 -    spinlock_t          lock;
    18.8 +    spinlock_t          rx_lock, tx_lock;
    18.9  };
   18.10  
   18.11  struct uart_driver {