ia64/linux-2.6.18-xen.hg

changeset 720:61d1f2810617

merge with linux-2.6.18-xen.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Nov 04 12:43:37 2008 +0900 (2008-11-04)
parents 45c3a3dfa5b5 2fb13b8cbe13
children 6591b4869889
files
line diff
     1.1 --- a/arch/i386/kernel/irq-xen.c	Fri Oct 24 11:22:02 2008 +0900
     1.2 +++ b/arch/i386/kernel/irq-xen.c	Tue Nov 04 12:43:37 2008 +0900
     1.3 @@ -66,7 +66,7 @@ fastcall unsigned int do_IRQ(struct pt_r
     1.4  		BUG();
     1.5  	}
     1.6  
     1.7 -	irq_enter();
     1.8 +	/*irq_enter();*/
     1.9  #ifdef CONFIG_DEBUG_STACKOVERFLOW
    1.10  	/* Debugging check for stack overflow: is there less than 1KB free? */
    1.11  	{
    1.12 @@ -121,7 +121,7 @@ fastcall unsigned int do_IRQ(struct pt_r
    1.13  #endif
    1.14  		__do_IRQ(irq, regs);
    1.15  
    1.16 -	irq_exit();
    1.17 +	/*irq_exit();*/
    1.18  
    1.19  	return 1;
    1.20  }
     2.1 --- a/arch/i386/kernel/pci-dma-xen.c	Fri Oct 24 11:22:02 2008 +0900
     2.2 +++ b/arch/i386/kernel/pci-dma-xen.c	Tue Nov 04 12:43:37 2008 +0900
     2.3 @@ -97,17 +97,11 @@ static int check_pages_physically_contig
     2.4  
     2.5  int range_straddles_page_boundary(paddr_t p, size_t size)
     2.6  {
     2.7 -	extern unsigned long *contiguous_bitmap;
     2.8  	unsigned long pfn = p >> PAGE_SHIFT;
     2.9  	unsigned int offset = p & ~PAGE_MASK;
    2.10  
    2.11 -	if (offset + size <= PAGE_SIZE)
    2.12 -		return 0;
    2.13 -	if (test_bit(pfn, contiguous_bitmap))
    2.14 -		return 0;
    2.15 -	if (check_pages_physically_contiguous(pfn, offset, size))
    2.16 -		return 0;
    2.17 -	return 1;
    2.18 +	return ((offset + size > PAGE_SIZE) &&
    2.19 +		!check_pages_physically_contiguous(pfn, offset, size));
    2.20  }
    2.21  
    2.22  int
     3.1 --- a/arch/i386/mm/highmem-xen.c	Fri Oct 24 11:22:02 2008 +0900
     3.2 +++ b/arch/i386/mm/highmem-xen.c	Tue Nov 04 12:43:37 2008 +0900
     3.3 @@ -128,9 +128,56 @@ struct page *kmap_atomic_to_page(void *p
     3.4  	return pte_page(*pte);
     3.5  }
     3.6  
     3.7 +void clear_highpage(struct page *page)
     3.8 +{
     3.9 +	void *kaddr;
    3.10 +
    3.11 +	if (likely(xen_feature(XENFEAT_highmem_assist))
    3.12 +	    && PageHighMem(page)) {
    3.13 +		struct mmuext_op meo;
    3.14 +
    3.15 +		meo.cmd = MMUEXT_CLEAR_PAGE;
    3.16 +		meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
    3.17 +		if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
    3.18 +			return;
    3.19 +	}
    3.20 +
    3.21 +	kaddr = kmap_atomic(page, KM_USER0);
    3.22 +	clear_page(kaddr);
    3.23 +	kunmap_atomic(kaddr, KM_USER0);
    3.24 +}
    3.25 +
    3.26 +void copy_highpage(struct page *to, struct page *from)
    3.27 +{
    3.28 +	void *vfrom, *vto;
    3.29 +
    3.30 +	if (likely(xen_feature(XENFEAT_highmem_assist))
    3.31 +	    && (PageHighMem(from) || PageHighMem(to))) {
    3.32 +		unsigned long from_pfn = page_to_pfn(from);
    3.33 +		unsigned long to_pfn = page_to_pfn(to);
    3.34 +		struct mmuext_op meo;
    3.35 +
    3.36 +		meo.cmd = MMUEXT_COPY_PAGE;
    3.37 +		meo.arg1.mfn = pfn_to_mfn(to_pfn);
    3.38 +		meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
    3.39 +		if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
    3.40 +		    && mfn_to_pfn(meo.arg1.mfn) == to_pfn
    3.41 +		    && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
    3.42 +			return;
    3.43 +	}
    3.44 +
    3.45 +	vfrom = kmap_atomic(from, KM_USER0);
    3.46 +	vto = kmap_atomic(to, KM_USER1);
    3.47 +	copy_page(vto, vfrom);
    3.48 +	kunmap_atomic(vfrom, KM_USER0);
    3.49 +	kunmap_atomic(vto, KM_USER1);
    3.50 +}
    3.51 +
    3.52  EXPORT_SYMBOL(kmap);
    3.53  EXPORT_SYMBOL(kunmap);
    3.54  EXPORT_SYMBOL(kmap_atomic);
    3.55  EXPORT_SYMBOL(kmap_atomic_pte);
    3.56  EXPORT_SYMBOL(kunmap_atomic);
    3.57  EXPORT_SYMBOL(kmap_atomic_to_page);
    3.58 +EXPORT_SYMBOL(clear_highpage);
    3.59 +EXPORT_SYMBOL(copy_highpage);
     4.1 --- a/arch/i386/mm/hypervisor.c	Fri Oct 24 11:22:02 2008 +0900
     4.2 +++ b/arch/i386/mm/hypervisor.c	Tue Nov 04 12:43:37 2008 +0900
     4.3 @@ -190,54 +190,6 @@ void xen_set_ldt(const void *ptr, unsign
     4.4  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
     4.5  }
     4.6  
     4.7 -/*
     4.8 - * Bitmap is indexed by page number. If bit is set, the page is part of a
     4.9 - * xen_create_contiguous_region() area of memory.
    4.10 - */
    4.11 -unsigned long *contiguous_bitmap;
    4.12 -
    4.13 -static void contiguous_bitmap_set(
    4.14 -	unsigned long first_page, unsigned long nr_pages)
    4.15 -{
    4.16 -	unsigned long start_off, end_off, curr_idx, end_idx;
    4.17 -
    4.18 -	curr_idx  = first_page / BITS_PER_LONG;
    4.19 -	start_off = first_page & (BITS_PER_LONG-1);
    4.20 -	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
    4.21 -	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
    4.22 -
    4.23 -	if (curr_idx == end_idx) {
    4.24 -		contiguous_bitmap[curr_idx] |=
    4.25 -			((1UL<<end_off)-1) & -(1UL<<start_off);
    4.26 -	} else {
    4.27 -		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
    4.28 -		while ( ++curr_idx < end_idx )
    4.29 -			contiguous_bitmap[curr_idx] = ~0UL;
    4.30 -		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
    4.31 -	}
    4.32 -}
    4.33 -
    4.34 -static void contiguous_bitmap_clear(
    4.35 -	unsigned long first_page, unsigned long nr_pages)
    4.36 -{
    4.37 -	unsigned long start_off, end_off, curr_idx, end_idx;
    4.38 -
    4.39 -	curr_idx  = first_page / BITS_PER_LONG;
    4.40 -	start_off = first_page & (BITS_PER_LONG-1);
    4.41 -	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
    4.42 -	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
    4.43 -
    4.44 -	if (curr_idx == end_idx) {
    4.45 -		contiguous_bitmap[curr_idx] &=
    4.46 -			-(1UL<<end_off) | ((1UL<<start_off)-1);
    4.47 -	} else {
    4.48 -		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
    4.49 -		while ( ++curr_idx != end_idx )
    4.50 -			contiguous_bitmap[curr_idx] = 0;
    4.51 -		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
    4.52 -	}
    4.53 -}
    4.54 -
    4.55  /* Protected by balloon_lock. */
    4.56  #define MAX_CONTIG_ORDER 9 /* 2MB */
    4.57  static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
    4.58 @@ -334,10 +286,6 @@ int xen_create_contiguous_region(
    4.59  	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
    4.60  		BUG();
    4.61  
    4.62 -	if (success)
    4.63 -		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
    4.64 -				      1UL << order);
    4.65 -
    4.66  	balloon_unlock(flags);
    4.67  
    4.68  	return success ? 0 : -ENOMEM;
    4.69 @@ -363,8 +311,7 @@ void xen_destroy_contiguous_region(unsig
    4.70  		}
    4.71  	};
    4.72  
    4.73 -	if (xen_feature(XENFEAT_auto_translated_physmap) ||
    4.74 -	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
    4.75 +	if (xen_feature(XENFEAT_auto_translated_physmap))
    4.76  		return;
    4.77  
    4.78  	if (unlikely(order > MAX_CONTIG_ORDER))
    4.79 @@ -377,8 +324,6 @@ void xen_destroy_contiguous_region(unsig
    4.80  
    4.81  	balloon_lock(flags);
    4.82  
    4.83 -	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
    4.84 -
    4.85  	/* 1. Find start MFN of contiguous extent. */
    4.86  	in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
    4.87  
     5.1 --- a/arch/i386/mm/init-xen.c	Fri Oct 24 11:22:02 2008 +0900
     5.2 +++ b/arch/i386/mm/init-xen.c	Tue Nov 04 12:43:37 2008 +0900
     5.3 @@ -47,8 +47,6 @@
     5.4  #include <asm/hypervisor.h>
     5.5  #include <asm/swiotlb.h>
     5.6  
     5.7 -extern unsigned long *contiguous_bitmap;
     5.8 -
     5.9  unsigned int __VMALLOC_RESERVE = 128 << 20;
    5.10  
    5.11  DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
    5.12 @@ -620,11 +618,6 @@ void __init mem_init(void)
    5.13  	int bad_ppro;
    5.14  	unsigned long pfn;
    5.15  
    5.16 -	contiguous_bitmap = alloc_bootmem_low_pages(
    5.17 -		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
    5.18 -	BUG_ON(!contiguous_bitmap);
    5.19 -	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
    5.20 -
    5.21  #if defined(CONFIG_SWIOTLB)
    5.22  	swiotlb_init();	
    5.23  #endif
     6.1 --- a/arch/ia64/kernel/setup.c	Fri Oct 24 11:22:02 2008 +0900
     6.2 +++ b/arch/ia64/kernel/setup.c	Tue Nov 04 12:43:37 2008 +0900
     6.3 @@ -684,9 +684,6 @@ setup_arch (char **cmdline_p)
     6.4  	}
     6.5  #endif
     6.6  	paging_init();
     6.7 -#ifdef CONFIG_XEN
     6.8 -	xen_contiguous_bitmap_init(max_pfn);
     6.9 -#endif
    6.10  }
    6.11  
    6.12  /*
     7.1 --- a/arch/ia64/xen/hypervisor.c	Fri Oct 24 11:22:02 2008 +0900
     7.2 +++ b/arch/ia64/xen/hypervisor.c	Tue Nov 04 12:43:37 2008 +0900
     7.3 @@ -80,154 +80,6 @@ xen_cpu_init(void)
     7.4  }
     7.5  
     7.6  /*
     7.7 - *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
     7.8 - * move those to lib/contiguous_bitmap?
     7.9 - *XXX discontigmem/sparsemem
    7.10 - */
    7.11 -
    7.12 -/*
    7.13 - * Bitmap is indexed by page number. If bit is set, the page is part of a
    7.14 - * xen_create_contiguous_region() area of memory.
    7.15 - */
    7.16 -unsigned long *contiguous_bitmap __read_mostly;
    7.17 -
    7.18 -#ifdef CONFIG_VIRTUAL_MEM_MAP
    7.19 -/* Following logic is stolen from create_mem_map_table() for virtual memmap */
    7.20 -static int
    7.21 -create_contiguous_bitmap(u64 start, u64 end, void *arg)
    7.22 -{
    7.23 -	unsigned long address, start_page, end_page;
    7.24 -	unsigned long bitmap_start, bitmap_end;
    7.25 -	unsigned char *bitmap;
    7.26 -	int node;
    7.27 -	pgd_t *pgd;
    7.28 -	pud_t *pud;
    7.29 -	pmd_t *pmd;
    7.30 -	pte_t *pte;
    7.31 -
    7.32 -	bitmap_start = (unsigned long)contiguous_bitmap +
    7.33 -		       ((__pa(start) >> PAGE_SHIFT) >> 3);
    7.34 -	bitmap_end = (unsigned long)contiguous_bitmap +
    7.35 -		     (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
    7.36 -
    7.37 -	start_page = bitmap_start & PAGE_MASK;
    7.38 -	end_page = PAGE_ALIGN(bitmap_end);
    7.39 -	node = paddr_to_nid(__pa(start));
    7.40 -
    7.41 -	bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
    7.42 -					  end_page - start_page);
    7.43 -	BUG_ON(!bitmap);
    7.44 -	memset(bitmap, 0, end_page - start_page);
    7.45 -
    7.46 -	for (address = start_page; address < end_page; address += PAGE_SIZE) {
    7.47 -		pgd = pgd_offset_k(address);
    7.48 -		if (pgd_none(*pgd))
    7.49 -			pgd_populate(&init_mm, pgd,
    7.50 -				     alloc_bootmem_pages_node(NODE_DATA(node),
    7.51 -							      PAGE_SIZE));
    7.52 -		pud = pud_offset(pgd, address);
    7.53 -
    7.54 -		if (pud_none(*pud))
    7.55 -			pud_populate(&init_mm, pud,
    7.56 -				     alloc_bootmem_pages_node(NODE_DATA(node),
    7.57 -							      PAGE_SIZE));
    7.58 -		pmd = pmd_offset(pud, address);
    7.59 -
    7.60 -		if (pmd_none(*pmd))
    7.61 -			pmd_populate_kernel(&init_mm, pmd,
    7.62 -					    alloc_bootmem_pages_node
    7.63 -					    (NODE_DATA(node), PAGE_SIZE));
    7.64 -		pte = pte_offset_kernel(pmd, address);
    7.65 -
    7.66 -		if (pte_none(*pte))
    7.67 -			set_pte(pte,
    7.68 -				pfn_pte(__pa(bitmap + (address - start_page))
    7.69 -					>> PAGE_SHIFT, PAGE_KERNEL));
    7.70 -	}
    7.71 -	return 0;
    7.72 -}
    7.73 -#endif
    7.74 -
    7.75 -static void
    7.76 -__contiguous_bitmap_init(unsigned long size)
    7.77 -{
    7.78 -	contiguous_bitmap = alloc_bootmem_pages(size);
    7.79 -	BUG_ON(!contiguous_bitmap);
    7.80 -	memset(contiguous_bitmap, 0, size);
    7.81 -}
    7.82 -
    7.83 -void
    7.84 -xen_contiguous_bitmap_init(unsigned long end_pfn)
    7.85 -{
    7.86 -	unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
    7.87 -#ifndef CONFIG_VIRTUAL_MEM_MAP
    7.88 -	__contiguous_bitmap_init(size);
    7.89 -#else
    7.90 -	unsigned long max_gap = 0;
    7.91 -
    7.92 -	efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
    7.93 -	if (max_gap < LARGE_GAP) {
    7.94 -		__contiguous_bitmap_init(size);
    7.95 -	} else {
    7.96 -		unsigned long map_size = PAGE_ALIGN(size);
    7.97 -		vmalloc_end -= map_size;
    7.98 -		contiguous_bitmap = (unsigned long*)vmalloc_end;
    7.99 -		efi_memmap_walk(create_contiguous_bitmap, NULL);
   7.100 -	}
   7.101 -#endif
   7.102 -}
   7.103 -
   7.104 -#if 0
   7.105 -int
   7.106 -contiguous_bitmap_test(void* p)
   7.107 -{
   7.108 -	return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
   7.109 -}
   7.110 -#endif
   7.111 -
   7.112 -static void contiguous_bitmap_set(
   7.113 -	unsigned long first_page, unsigned long nr_pages)
   7.114 -{
   7.115 -	unsigned long start_off, end_off, curr_idx, end_idx;
   7.116 -
   7.117 -	curr_idx  = first_page / BITS_PER_LONG;
   7.118 -	start_off = first_page & (BITS_PER_LONG-1);
   7.119 -	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
   7.120 -	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
   7.121 -
   7.122 -	if (curr_idx == end_idx) {
   7.123 -		contiguous_bitmap[curr_idx] |=
   7.124 -			((1UL<<end_off)-1) & -(1UL<<start_off);
   7.125 -	} else {
   7.126 -		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
   7.127 -		while ( ++curr_idx < end_idx )
   7.128 -			contiguous_bitmap[curr_idx] = ~0UL;
   7.129 -		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
   7.130 -	}
   7.131 -}
   7.132 -
   7.133 -static void contiguous_bitmap_clear(
   7.134 -	unsigned long first_page, unsigned long nr_pages)
   7.135 -{
   7.136 -	unsigned long start_off, end_off, curr_idx, end_idx;
   7.137 -
   7.138 -	curr_idx  = first_page / BITS_PER_LONG;
   7.139 -	start_off = first_page & (BITS_PER_LONG-1);
   7.140 -	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
   7.141 -	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
   7.142 -
   7.143 -	if (curr_idx == end_idx) {
   7.144 -		contiguous_bitmap[curr_idx] &=
   7.145 -			-(1UL<<end_off) | ((1UL<<start_off)-1);
   7.146 -	} else {
   7.147 -		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
   7.148 -		while ( ++curr_idx != end_idx )
   7.149 -			contiguous_bitmap[curr_idx] = 0;
   7.150 -		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
   7.151 -	}
   7.152 -}
   7.153 -
   7.154 -/*
   7.155   * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
   7.156   * are based on i386 xen_create_contiguous_region(),
   7.157   * xen_destroy_contiguous_region()
   7.158 @@ -303,8 +155,6 @@ int
   7.159  		} else
   7.160  			success = 1;
   7.161  	}
   7.162 -	if (success)
   7.163 -		contiguous_bitmap_set(start_gpfn, num_gpfn);
   7.164  #if 0
   7.165  	if (success) {
   7.166  		unsigned long mfn;
   7.167 @@ -363,9 +213,6 @@ void
   7.168  	};
   7.169  	
   7.170  
   7.171 -	if (!test_bit(start_gpfn, contiguous_bitmap))
   7.172 -		return;
   7.173 -
   7.174  	if (unlikely(order > MAX_CONTIG_ORDER))
   7.175  		return;
   7.176  
   7.177 @@ -376,8 +223,6 @@ void
   7.178  
   7.179  	balloon_lock(flags);
   7.180  
   7.181 -	contiguous_bitmap_clear(start_gpfn, num_gpfn);
   7.182 -
   7.183  	/* Do the exchange for non-contiguous MFNs. */
   7.184  	in_frame = start_gpfn;
   7.185  	for (i = 0; i < num_gpfn; i++)
     8.1 --- a/arch/ia64/xen/xen_dma.c	Fri Oct 24 11:22:02 2008 +0900
     8.2 +++ b/arch/ia64/xen/xen_dma.c	Tue Nov 04 12:43:37 2008 +0900
     8.3 @@ -57,7 +57,6 @@ static int check_pages_physically_contig
     8.4  
     8.5  int range_straddles_page_boundary(paddr_t p, size_t size)
     8.6  {
     8.7 -	extern unsigned long *contiguous_bitmap;
     8.8  	unsigned long pfn = p >> PAGE_SHIFT;
     8.9  	unsigned int offset = p & ~PAGE_MASK;
    8.10  
    8.11 @@ -66,8 +65,6 @@ int range_straddles_page_boundary(paddr_
    8.12  
    8.13  	if (offset + size <= PAGE_SIZE)
    8.14  		return 0;
    8.15 -	if (test_bit(pfn, contiguous_bitmap))
    8.16 -		return 0;
    8.17  	if (check_pages_physically_contiguous(pfn, offset, size))
    8.18  		return 0;
    8.19  	return 1;
     9.1 --- a/arch/x86_64/kernel/entry-xen.S	Fri Oct 24 11:22:02 2008 +0900
     9.2 +++ b/arch/x86_64/kernel/entry-xen.S	Tue Nov 04 12:43:37 2008 +0900
     9.3 @@ -117,9 +117,9 @@ NMI_MASK = 0x80000000
     9.4  	.macro	CFI_DEFAULT_STACK start=1,adj=0
     9.5  	.if \start
     9.6  	CFI_STARTPROC	simple
     9.7 -	CFI_DEF_CFA	rsp,SS+8-(\adj*ARGOFFSET)
     9.8 +	CFI_DEF_CFA	rsp,SS+8 - \adj*ARGOFFSET
     9.9  	.else
    9.10 -	CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
    9.11 +	CFI_DEF_CFA_OFFSET SS+8 - \adj*ARGOFFSET
    9.12  	.endif
    9.13  	.if \adj == 0
    9.14  	CFI_REL_OFFSET	r15,R15
    9.15 @@ -129,20 +129,20 @@ NMI_MASK = 0x80000000
    9.16  	CFI_REL_OFFSET	rbp,RBP
    9.17  	CFI_REL_OFFSET	rbx,RBX
    9.18  	.endif
    9.19 -	CFI_REL_OFFSET	r11,R11
    9.20 -	CFI_REL_OFFSET	r10,R10
    9.21 -	CFI_REL_OFFSET	r9,R9
    9.22 -	CFI_REL_OFFSET	r8,R8
    9.23 -	CFI_REL_OFFSET	rax,RAX
    9.24 -	CFI_REL_OFFSET	rcx,RCX
    9.25 -	CFI_REL_OFFSET	rdx,RDX
    9.26 -	CFI_REL_OFFSET	rsi,RSI
    9.27 -	CFI_REL_OFFSET	rdi,RDI
    9.28 -	CFI_REL_OFFSET	rip,RIP
    9.29 -	/*CFI_REL_OFFSET	cs,CS*/
    9.30 -	/*CFI_REL_OFFSET	rflags,EFLAGS*/
    9.31 -	CFI_REL_OFFSET	rsp,RSP
    9.32 -	/*CFI_REL_OFFSET	ss,SS*/
    9.33 +	CFI_REL_OFFSET	r11,R11 - \adj*ARGOFFSET
    9.34 +	CFI_REL_OFFSET	r10,R10 - \adj*ARGOFFSET
    9.35 +	CFI_REL_OFFSET	r9,R9 - \adj*ARGOFFSET
    9.36 +	CFI_REL_OFFSET	r8,R8 - \adj*ARGOFFSET
    9.37 +	CFI_REL_OFFSET	rax,RAX - \adj*ARGOFFSET
    9.38 +	CFI_REL_OFFSET	rcx,RCX - \adj*ARGOFFSET
    9.39 +	CFI_REL_OFFSET	rdx,RDX - \adj*ARGOFFSET
    9.40 +	CFI_REL_OFFSET	rsi,RSI - \adj*ARGOFFSET
    9.41 +	CFI_REL_OFFSET	rdi,RDI - \adj*ARGOFFSET
    9.42 +	CFI_REL_OFFSET	rip,RIP - \adj*ARGOFFSET
    9.43 +	/*CFI_REL_OFFSET	cs,CS - \adj*ARGOFFSET*/
    9.44 +	/*CFI_REL_OFFSET	rflags,EFLAGS - \adj*ARGOFFSET*/
    9.45 +	CFI_REL_OFFSET	rsp,RSP - \adj*ARGOFFSET
    9.46 +	/*CFI_REL_OFFSET	ss,SS - \adj*ARGOFFSET*/
    9.47  	.endm
    9.48  
    9.49          /*
    10.1 --- a/arch/x86_64/kernel/irq-xen.c	Fri Oct 24 11:22:02 2008 +0900
    10.2 +++ b/arch/x86_64/kernel/irq-xen.c	Tue Nov 04 12:43:37 2008 +0900
    10.3 @@ -124,13 +124,13 @@ asmlinkage unsigned int do_IRQ(struct pt
    10.4  		BUG();
    10.5  	}
    10.6  
    10.7 -	exit_idle();
    10.8 -	irq_enter();
    10.9 +	/*exit_idle();*/
   10.10 +	/*irq_enter();*/
   10.11  #ifdef CONFIG_DEBUG_STACKOVERFLOW
   10.12  	stack_overflow_check(regs);
   10.13  #endif
   10.14  	__do_IRQ(irq, regs);
   10.15 -	irq_exit();
   10.16 +	/*irq_exit();*/
   10.17  
   10.18  	return 1;
   10.19  }
    11.1 --- a/arch/x86_64/mm/init-xen.c	Fri Oct 24 11:22:02 2008 +0900
    11.2 +++ b/arch/x86_64/mm/init-xen.c	Tue Nov 04 12:43:37 2008 +0900
    11.3 @@ -61,8 +61,6 @@ EXPORT_SYMBOL(__kernel_page_user);
    11.4  
    11.5  int after_bootmem;
    11.6  
    11.7 -extern unsigned long *contiguous_bitmap;
    11.8 -
    11.9  static unsigned long dma_reserve __initdata;
   11.10  
   11.11  DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
   11.12 @@ -972,11 +970,6 @@ void __init mem_init(void)
   11.13  	long codesize, reservedpages, datasize, initsize;
   11.14  	unsigned long pfn;
   11.15  
   11.16 -	contiguous_bitmap = alloc_bootmem_low_pages(
   11.17 -		(end_pfn + 2*BITS_PER_LONG) >> 3);
   11.18 -	BUG_ON(!contiguous_bitmap);
   11.19 -	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
   11.20 -
   11.21  	pci_iommu_alloc();
   11.22  
   11.23  	/* How many end-of-memory variables you have, grandma! */
    12.1 --- a/drivers/acpi/processor_core.c	Fri Oct 24 11:22:02 2008 +0900
    12.2 +++ b/drivers/acpi/processor_core.c	Tue Nov 04 12:43:37 2008 +0900
    12.3 @@ -513,8 +513,17 @@ static int acpi_processor_get_info(struc
    12.4  #if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
    12.5  	acpi_processor_ppc_has_changed(pr);
    12.6  #endif
    12.7 -	acpi_processor_get_throttling_info(pr);
    12.8 -	acpi_processor_get_limit_info(pr);
    12.9 +
   12.10 +	/* 
   12.11 +	 * pr->id may equal to -1 while processor_cntl_external enabled.
   12.12 +	 * throttle and thermal module don't support this case.
   12.13 +	 * Tx only works when dom0 vcpu == pcpu num by far, as we give 
   12.14 +	 * control to dom0.
   12.15 +	 */
   12.16 +	if (pr->id != -1) {
   12.17 +		acpi_processor_get_throttling_info(pr);
   12.18 +		acpi_processor_get_limit_info(pr);
   12.19 +	}
   12.20  
   12.21  	return 0;
   12.22  }
    13.1 --- a/drivers/acpi/processor_idle.c	Fri Oct 24 11:22:02 2008 +0900
    13.2 +++ b/drivers/acpi/processor_idle.c	Tue Nov 04 12:43:37 2008 +0900
    13.3 @@ -654,7 +654,7 @@ static int acpi_processor_get_power_info
    13.4  	if (nocst)
    13.5  		return -ENODEV;
    13.6  
    13.7 -	current_count = 1;
    13.8 +	current_count = 0;
    13.9  
   13.10  	/* Zero initialize C2 onwards and prepare for fresh CST lookup */
   13.11  	for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++)
   13.12 @@ -728,15 +728,19 @@ static int acpi_processor_get_power_info
   13.13  
   13.14  		cx.type = obj->integer.value;
   13.15  
   13.16 +		/*
   13.17 +		 * Some buggy BIOSes won't list C1 in _CST -
   13.18 +		 * Let acpi_processor_get_power_info_default() handle them later
   13.19 +		 */
   13.20 +		if (i == 1 && cx.type != ACPI_STATE_C1)
   13.21 +			current_count++;
   13.22 +
   13.23  		/* Following check doesn't apply to external control case */
   13.24  		if (!processor_pm_external() &&
   13.25  		    (cx.type != ACPI_STATE_C1) &&
   13.26  		    (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
   13.27  			continue;
   13.28  
   13.29 -		if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3))
   13.30 -			continue;
   13.31 -
   13.32  		obj = (union acpi_object *)&(element->package.elements[2]);
   13.33  		if (obj->type != ACPI_TYPE_INTEGER)
   13.34  			continue;
    14.1 --- a/drivers/pci/bus.c	Fri Oct 24 11:22:02 2008 +0900
    14.2 +++ b/drivers/pci/bus.c	Tue Nov 04 12:43:37 2008 +0900
    14.3 @@ -17,8 +17,6 @@
    14.4  
    14.5  #include "pci.h"
    14.6  
    14.7 -extern int pci_mem_align;
    14.8 -
    14.9  /**
   14.10   * pci_bus_alloc_resource - allocate a resource from a parent bus
   14.11   * @bus: PCI bus
   14.12 @@ -46,11 +44,6 @@ pci_bus_alloc_resource(struct pci_bus *b
   14.13  
   14.14  	type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
   14.15  
   14.16 -	/* If the boot parameter 'pci-mem-align' was specified then we need to 
   14.17 -	   align the memory addresses, at page size alignment. */
   14.18 -	if (pci_mem_align && (align < (PAGE_SIZE-1)))
   14.19 -		align = PAGE_SIZE - 1;
   14.20 -
   14.21  	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
   14.22  		struct resource *r = bus->resource[i];
   14.23  		if (!r)
    15.1 --- a/drivers/pci/quirks.c	Fri Oct 24 11:22:02 2008 +0900
    15.2 +++ b/drivers/pci/quirks.c	Tue Nov 04 12:43:37 2008 +0900
    15.3 @@ -23,25 +23,17 @@
    15.4  #include <linux/acpi.h>
    15.5  #include "pci.h"
    15.6  
    15.7 -/* A global flag which signals if we should page-align PCI mem windows. */
    15.8 -int pci_mem_align = 0;
    15.9  
   15.10 -static int __init set_pci_mem_align(char *str)
   15.11 -{
   15.12 -	pci_mem_align = 1;
   15.13 -	return 1;
   15.14 -}
   15.15 -__setup("pci-mem-align", set_pci_mem_align);
   15.16 -
   15.17 -
   15.18 -/* This quirk function enables us to force all memory resources which are 
   15.19 - * assigned to PCI devices, to be page-aligned.
   15.20 +/*
   15.21 + * This quirk function disables the device and releases resources
   15.22 + * which is specified by kernel's boot parameter 'reassigndev'.
   15.23 + * Later on, kernel will assign page-aligned memory resource back
   15.24 + * to that device.
   15.25   */
   15.26 -static void __devinit quirk_align_mem_resources(struct pci_dev *dev)
   15.27 +static void __devinit quirk_release_resources(struct pci_dev *dev)
   15.28  {
   15.29  	int i;
   15.30  	struct resource *r;
   15.31 -	resource_size_t old_start;
   15.32  
   15.33  	if (is_reassigndev(dev)) {
   15.34  		if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
   15.35 @@ -76,21 +68,8 @@ static void __devinit quirk_align_mem_re
   15.36  		}
   15.37  		return;
   15.38  	}
   15.39 -
   15.40 -	if (!pci_mem_align)
   15.41 -		return;
   15.42 -
   15.43 -	for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
   15.44 -		r = &dev->resource[i];
   15.45 -		if ((r == NULL) || !(r->flags & IORESOURCE_MEM))
   15.46 -			continue;
   15.47 -
   15.48 -		old_start = r->start;
   15.49 -		r->start = (r->start + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
   15.50 -		r->end = r->end - (old_start - r->start);
   15.51 -	}
   15.52  }
   15.53 -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_align_mem_resources);
   15.54 +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_release_resources);
   15.55  
   15.56  /* The Mellanox Tavor device gives false positive parity errors
   15.57   * Mark this device with a broken_parity_status, to allow
    16.1 --- a/drivers/xen/core/evtchn.c	Fri Oct 24 11:22:02 2008 +0900
    16.2 +++ b/drivers/xen/core/evtchn.c	Tue Nov 04 12:43:37 2008 +0900
    16.3 @@ -246,6 +246,8 @@ asmlinkage void evtchn_do_upcall(struct 
    16.4  	shared_info_t      *s = HYPERVISOR_shared_info;
    16.5  	vcpu_info_t        *vcpu_info = &s->vcpu_info[cpu];
    16.6  
    16.7 +	exit_idle();
    16.8 +	irq_enter();
    16.9  
   16.10  	do {
   16.11  		/* Avoid a callback storm when we reenable delivery. */
   16.12 @@ -253,7 +255,7 @@ asmlinkage void evtchn_do_upcall(struct 
   16.13  
   16.14  		/* Nested invocations bail immediately. */
   16.15  		if (unlikely(per_cpu(upcall_count, cpu)++))
   16.16 -			return;
   16.17 +			break;
   16.18  
   16.19  #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
   16.20  		/* Clear master flag /before/ clearing selector flag. */
   16.21 @@ -293,10 +295,8 @@ asmlinkage void evtchn_do_upcall(struct 
   16.22  				port = (l1i * BITS_PER_LONG) + l2i;
   16.23  				if ((irq = evtchn_to_irq[port]) != -1)
   16.24  					do_IRQ(irq, regs);
   16.25 -				else {
   16.26 -					exit_idle();
   16.27 +				else
   16.28  					evtchn_device_upcall(port);
   16.29 -				}
   16.30  
   16.31  				/* if this is the final port processed, we'll pick up here+1 next time */
   16.32  				per_cpu(last_processed_l1i, cpu) = l1i;
   16.33 @@ -314,6 +314,8 @@ asmlinkage void evtchn_do_upcall(struct 
   16.34  		count = per_cpu(upcall_count, cpu);
   16.35  		per_cpu(upcall_count, cpu) = 0;
   16.36  	} while (unlikely(count != 1));
   16.37 +
   16.38 +	irq_exit();
   16.39  }
   16.40  
   16.41  static int find_unbound_irq(void)
    17.1 --- a/drivers/xen/core/gnttab.c	Fri Oct 24 11:22:02 2008 +0900
    17.2 +++ b/drivers/xen/core/gnttab.c	Tue Nov 04 12:43:37 2008 +0900
    17.3 @@ -112,6 +112,7 @@ static void do_free_callbacks(void)
    17.4  		next = callback->next;
    17.5  		if (gnttab_free_count >= callback->count) {
    17.6  			callback->next = NULL;
    17.7 +			callback->queued = 0;
    17.8  			callback->fn(callback->arg);
    17.9  		} else {
   17.10  			callback->next = gnttab_free_callback_list;
   17.11 @@ -343,11 +344,12 @@ void gnttab_request_free_callback(struct
   17.12  {
   17.13  	unsigned long flags;
   17.14  	spin_lock_irqsave(&gnttab_list_lock, flags);
   17.15 -	if (callback->next)
   17.16 +	if (callback->queued)
   17.17  		goto out;
   17.18  	callback->fn = fn;
   17.19  	callback->arg = arg;
   17.20  	callback->count = count;
   17.21 +	callback->queued = 1;
   17.22  	callback->next = gnttab_free_callback_list;
   17.23  	gnttab_free_callback_list = callback;
   17.24  	check_free_callbacks();
   17.25 @@ -365,6 +367,7 @@ void gnttab_cancel_free_callback(struct 
   17.26  	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
   17.27  		if (*pcb == callback) {
   17.28  			*pcb = callback->next;
   17.29 +			callback->queued = 0;
   17.30  			break;
   17.31  		}
   17.32  	}
    18.1 --- a/drivers/xen/core/xen_sysfs.c	Fri Oct 24 11:22:02 2008 +0900
    18.2 +++ b/drivers/xen/core/xen_sysfs.c	Tue Nov 04 12:43:37 2008 +0900
    18.3 @@ -337,6 +337,9 @@ static void xen_properties_destroy(void)
    18.4  
    18.5  #ifdef CONFIG_KEXEC
    18.6  
    18.7 +extern size_t vmcoreinfo_size_xen;
    18.8 +extern unsigned long paddr_vmcoreinfo_xen;
    18.9 +
   18.10  static ssize_t vmcoreinfo_show(struct hyp_sysfs_attr *attr, char *page)
   18.11  {
   18.12  	return sprintf(page, "%lx %zx\n",
    19.1 --- a/include/asm-i386/mach-xen/asm/highmem.h	Fri Oct 24 11:22:02 2008 +0900
    19.2 +++ b/include/asm-i386/mach-xen/asm/highmem.h	Tue Nov 04 12:43:37 2008 +0900
    19.3 @@ -75,6 +75,23 @@ struct page *kmap_atomic_to_page(void *p
    19.4  
    19.5  #define flush_cache_kmaps()	do { } while (0)
    19.6  
    19.7 +void clear_highpage(struct page *);
    19.8 +static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
    19.9 +{
   19.10 +	clear_highpage(page);
   19.11 +}
   19.12 +#define __HAVE_ARCH_CLEAR_HIGHPAGE
   19.13 +#define __HAVE_ARCH_CLEAR_USER_HIGHPAGE
   19.14 +
   19.15 +void copy_highpage(struct page *to, struct page *from);
   19.16 +static inline void copy_user_highpage(struct page *to, struct page *from,
   19.17 +	unsigned long vaddr)
   19.18 +{
   19.19 +	copy_highpage(to, from);
   19.20 +}
   19.21 +#define __HAVE_ARCH_COPY_HIGHPAGE
   19.22 +#define __HAVE_ARCH_COPY_USER_HIGHPAGE
   19.23 +
   19.24  #endif /* __KERNEL__ */
   19.25  
   19.26  #endif /* _ASM_HIGHMEM_H */
    20.1 --- a/include/asm-ia64/hypercall.h	Fri Oct 24 11:22:02 2008 +0900
    20.2 +++ b/include/asm-ia64/hypercall.h	Tue Nov 04 12:43:37 2008 +0900
    20.3 @@ -237,11 +237,7 @@ xencomm_arch_hypercall_opt_feature(struc
    20.4  
    20.5  extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
    20.6  static inline void exit_idle(void) {}
    20.7 -#define do_IRQ(irq, regs) ({			\
    20.8 -	irq_enter();				\
    20.9 -	__do_IRQ((irq), (regs));		\
   20.10 -	irq_exit();				\
   20.11 -})
   20.12 +#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
   20.13  
   20.14  #include <linux/err.h>
   20.15  #ifdef HAVE_XEN_PLATFORM_COMPAT_H
    21.1 --- a/include/asm-ia64/hypervisor.h	Fri Oct 24 11:22:02 2008 +0900
    21.2 +++ b/include/asm-ia64/hypervisor.h	Tue Nov 04 12:43:37 2008 +0900
    21.3 @@ -140,7 +140,6 @@ int privcmd_mmap(struct file * file, str
    21.4  #define	pte_mfn(_x)	pte_pfn(_x)
    21.5  #define phys_to_machine_mapping_valid(_x)	(1)
    21.6  
    21.7 -void xen_contiguous_bitmap_init(unsigned long end_pfn);
    21.8  int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits);
    21.9  static inline int
   21.10  xen_create_contiguous_region(unsigned long vstart,
    22.1 --- a/include/linux/highmem.h	Fri Oct 24 11:22:02 2008 +0900
    22.2 +++ b/include/linux/highmem.h	Tue Nov 04 12:43:37 2008 +0900
    22.3 @@ -50,6 +50,7 @@ static inline void *kmap(struct page *pa
    22.4  
    22.5  #endif /* CONFIG_HIGHMEM */
    22.6  
    22.7 +#ifndef __HAVE_ARCH_CLEAR_USER_HIGHPAGE
    22.8  /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
    22.9  static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
   22.10  {
   22.11 @@ -59,6 +60,7 @@ static inline void clear_user_highpage(s
   22.12  	/* Make sure this page is cleared on other CPU's too before using it */
   22.13  	smp_wmb();
   22.14  }
   22.15 +#endif
   22.16  
   22.17  #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
   22.18  static inline struct page *
   22.19 @@ -73,12 +75,14 @@ alloc_zeroed_user_highpage(struct vm_are
   22.20  }
   22.21  #endif
   22.22  
   22.23 +#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE
   22.24  static inline void clear_highpage(struct page *page)
   22.25  {
   22.26  	void *kaddr = kmap_atomic(page, KM_USER0);
   22.27  	clear_page(kaddr);
   22.28  	kunmap_atomic(kaddr, KM_USER0);
   22.29  }
   22.30 +#endif
   22.31  
   22.32  /*
   22.33   * Same but also flushes aliased cache contents to RAM.
   22.34 @@ -95,6 +99,7 @@ static inline void memclear_highpage_flu
   22.35  	kunmap_atomic(kaddr, KM_USER0);
   22.36  }
   22.37  
   22.38 +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
   22.39  static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
   22.40  {
   22.41  	char *vfrom, *vto;
   22.42 @@ -107,7 +112,9 @@ static inline void copy_user_highpage(st
   22.43  	/* Make sure this page is cleared on other CPU's too before using it */
   22.44  	smp_wmb();
   22.45  }
   22.46 +#endif
   22.47  
   22.48 +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
   22.49  static inline void copy_highpage(struct page *to, struct page *from)
   22.50  {
   22.51  	char *vfrom, *vto;
   22.52 @@ -118,5 +125,6 @@ static inline void copy_highpage(struct 
   22.53  	kunmap_atomic(vfrom, KM_USER0);
   22.54  	kunmap_atomic(vto, KM_USER1);
   22.55  }
   22.56 +#endif
   22.57  
   22.58  #endif /* _LINUX_HIGHMEM_H */
    23.1 --- a/include/xen/gnttab.h	Fri Oct 24 11:22:02 2008 +0900
    23.2 +++ b/include/xen/gnttab.h	Tue Nov 04 12:43:37 2008 +0900
    23.3 @@ -48,6 +48,7 @@ struct gnttab_free_callback {
    23.4  	void (*fn)(void *);
    23.5  	void *arg;
    23.6  	u16 count;
    23.7 +	u8 queued;
    23.8  };
    23.9  
   23.10  int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
    24.1 --- a/include/xen/interface/arch-ia64/hvm/save.h	Fri Oct 24 11:22:02 2008 +0900
    24.2 +++ b/include/xen/interface/arch-ia64/hvm/save.h	Tue Nov 04 12:43:37 2008 +0900
    24.3 @@ -106,7 +106,11 @@ DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct h
    24.4   */
    24.5  #define VIOSAPIC_NUM_PINS     48
    24.6  
    24.7 -union viosapic_rte
    24.8 +/* To share VT-d code which uses vioapic_redir_entry.
    24.9 + * Although on ia64 this is for vsapic, but we have to vioapic_redir_entry
   24.10 + * instead of viosapic_redir_entry.
   24.11 + */
   24.12 +union vioapic_redir_entry
   24.13  {
   24.14      uint64_t bits;
   24.15      struct {
   24.16 @@ -124,7 +128,7 @@ union viosapic_rte
   24.17  
   24.18          uint8_t reserved[3];
   24.19          uint16_t dest_id;
   24.20 -    }; 
   24.21 +    } fields;
   24.22  };
   24.23  
   24.24  struct hvm_hw_ia64_viosapic {
   24.25 @@ -134,7 +138,7 @@ struct hvm_hw_ia64_viosapic {
   24.26      uint32_t    pad;
   24.27      uint64_t    lowest_vcpu_id;
   24.28      uint64_t    base_address;
   24.29 -    union viosapic_rte  redirtbl[VIOSAPIC_NUM_PINS];
   24.30 +    union vioapic_redir_entry  redirtbl[VIOSAPIC_NUM_PINS];
   24.31  };
   24.32  DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic);
   24.33    
    25.1 --- a/include/xen/interface/arch-x86/hvm/save.h	Fri Oct 24 11:22:02 2008 +0900
    25.2 +++ b/include/xen/interface/arch-x86/hvm/save.h	Tue Nov 04 12:43:37 2008 +0900
    25.3 @@ -421,9 +421,20 @@ struct hvm_hw_mtrr {
    25.4  
    25.5  DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
    25.6  
    25.7 +/*
    25.8 + * Viridian hypervisor context.
    25.9 + */
   25.10 +
   25.11 +struct hvm_viridian_context {
   25.12 +    uint64_t hypercall_gpa;
   25.13 +    uint64_t guest_os_id;
   25.14 +};
   25.15 +
   25.16 +DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
   25.17 +
   25.18  /* 
   25.19   * Largest type-code in use
   25.20   */
   25.21 -#define HVM_SAVE_CODE_MAX 14
   25.22 +#define HVM_SAVE_CODE_MAX 15
   25.23  
   25.24  #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
    26.1 --- a/include/xen/interface/domctl.h	Fri Oct 24 11:22:02 2008 +0900
    26.2 +++ b/include/xen/interface/domctl.h	Tue Nov 04 12:43:37 2008 +0900
    26.3 @@ -614,6 +614,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_subsc
    26.4  #define XEN_DOMCTL_set_machine_address_size  51
    26.5  #define XEN_DOMCTL_get_machine_address_size  52
    26.6  
    26.7 +/*
    26.8 + * Do not inject spurious page faults into this domain.
    26.9 + */
   26.10 +#define XEN_DOMCTL_suppress_spurious_page_faults 53
   26.11  
   26.12  struct xen_domctl {
   26.13      uint32_t cmd;
    27.1 --- a/include/xen/interface/features.h	Fri Oct 24 11:22:02 2008 +0900
    27.2 +++ b/include/xen/interface/features.h	Tue Nov 04 12:43:37 2008 +0900
    27.3 @@ -59,6 +59,9 @@
    27.4  /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
    27.5  #define XENFEAT_mmu_pt_update_preserve_ad  5
    27.6  
    27.7 +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
    27.8 +#define XENFEAT_highmem_assist             6
    27.9 +
   27.10  #define XENFEAT_NR_SUBMAPS 1
   27.11  
   27.12  #endif /* __XEN_PUBLIC_FEATURES_H__ */
    28.1 --- a/include/xen/interface/hvm/params.h	Fri Oct 24 11:22:02 2008 +0900
    28.2 +++ b/include/xen/interface/hvm/params.h	Tue Nov 04 12:43:37 2008 +0900
    28.3 @@ -51,9 +51,16 @@
    28.4  #define HVM_PARAM_BUFIOREQ_PFN 6
    28.5  
    28.6  #ifdef __ia64__
    28.7 +
    28.8  #define HVM_PARAM_NVRAM_FD     7
    28.9  #define HVM_PARAM_VHPT_SIZE    8
   28.10  #define HVM_PARAM_BUFPIOREQ_PFN	9
   28.11 +
   28.12 +#elif defined(__i386__) || defined(__x86_64__)
   28.13 +
   28.14 +/* Expose Viridian interfaces to this HVM guest? */
   28.15 +#define HVM_PARAM_VIRIDIAN     9
   28.16 +
   28.17  #endif
   28.18  
   28.19  /*
    29.1 --- a/include/xen/interface/kexec.h	Fri Oct 24 11:22:02 2008 +0900
    29.2 +++ b/include/xen/interface/kexec.h	Tue Nov 04 12:43:37 2008 +0900
    29.3 @@ -175,8 +175,6 @@ void vmcoreinfo_append_str(const char *f
    29.4  #define VMCOREINFO_OFFSET_ALIAS(name, field, alias) \
    29.5         vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #alias, \
    29.6                               (unsigned long)offsetof(struct name, field))
    29.7 -extern size_t vmcoreinfo_size_xen;
    29.8 -extern unsigned long paddr_vmcoreinfo_xen;
    29.9  
   29.10  #endif /* _XEN_PUBLIC_KEXEC_H */
   29.11  
    30.1 --- a/include/xen/interface/trace.h	Fri Oct 24 11:22:02 2008 +0900
    30.2 +++ b/include/xen/interface/trace.h	Tue Nov 04 12:43:37 2008 +0900
    30.3 @@ -38,6 +38,7 @@
    30.4  #define TRC_MEM      0x0010f000    /* Xen memory trace         */
    30.5  #define TRC_PV       0x0020f000    /* Xen PV traces            */
    30.6  #define TRC_SHADOW   0x0040f000    /* Xen shadow tracing       */
    30.7 +#define TRC_PM       0x0080f000    /* Xen power management trace */
    30.8  #define TRC_ALL      0x0ffff000
    30.9  #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
   30.10  #define TRC_HD_CYCLE_FLAG (1UL<<31)
   30.11 @@ -146,6 +147,15 @@
   30.12  #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
   30.13  #define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
   30.14  
   30.15 +/* trace subclasses for power management */
   30.16 +#define TRC_PM_FREQ     0x00801000      /* xen cpu freq events */
   30.17 +#define TRC_PM_IDLE     0x00802000      /* xen cpu idle events */
   30.18 +
   30.19 +/* trace events for per class */
   30.20 +#define TRC_PM_FREQ_CHANGE      (TRC_PM_FREQ + 0x01)
   30.21 +#define TRC_PM_IDLE_ENTRY       (TRC_PM_IDLE + 0x01)
   30.22 +#define TRC_PM_IDLE_EXIT        (TRC_PM_IDLE + 0x02)
   30.23 +
   30.24  /* This structure represents a single trace buffer record. */
   30.25  struct t_rec {
   30.26      uint32_t event:28;
    31.1 --- a/include/xen/interface/xen.h	Fri Oct 24 11:22:02 2008 +0900
    31.2 +++ b/include/xen/interface/xen.h	Tue Nov 04 12:43:37 2008 +0900
    31.3 @@ -231,6 +231,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
    31.4   * cmd: MMUEXT_SET_LDT
    31.5   * linear_addr: Linear address of LDT base (NB. must be page-aligned).
    31.6   * nr_ents: Number of entries in LDT.
    31.7 + *
    31.8 + * cmd: MMUEXT_CLEAR_PAGE
    31.9 + * mfn: Machine frame number to be cleared.
   31.10 + *
   31.11 + * cmd: MMUEXT_COPY_PAGE
   31.12 + * mfn: Machine frame number of the destination page.
   31.13 + * src_mfn: Machine frame number of the source page.
   31.14   */
   31.15  #define MMUEXT_PIN_L1_TABLE      0
   31.16  #define MMUEXT_PIN_L2_TABLE      1
   31.17 @@ -247,12 +254,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
   31.18  #define MMUEXT_FLUSH_CACHE      12
   31.19  #define MMUEXT_SET_LDT          13
   31.20  #define MMUEXT_NEW_USER_BASEPTR 15
   31.21 +#define MMUEXT_CLEAR_PAGE       16
   31.22 +#define MMUEXT_COPY_PAGE        17
   31.23  
   31.24  #ifndef __ASSEMBLY__
   31.25  struct mmuext_op {
   31.26      unsigned int cmd;
   31.27      union {
   31.28 -        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
   31.29 +        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
   31.30 +         * CLEAR_PAGE, COPY_PAGE */
   31.31          xen_pfn_t     mfn;
   31.32          /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
   31.33          unsigned long linear_addr;
   31.34 @@ -266,6 +276,8 @@ struct mmuext_op {
   31.35  #else
   31.36          void *vcpumask;
   31.37  #endif
   31.38 +        /* COPY_PAGE */
   31.39 +        xen_pfn_t src_mfn;
   31.40      } arg2;
   31.41  };
   31.42  typedef struct mmuext_op mmuext_op_t;