ia64/xen-unstable

changeset 6503:1fc6473ecc01

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Aug 30 20:03:51 2005 +0000 (2005-08-30)
parents 9225c3f597db 551870a55f24
children 38312fe7ec38
files linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c tools/console/daemon/io.c tools/console/daemon/io.h tools/console/daemon/main.c tools/console/daemon/utils.c tools/misc/cpuperf/cpuperf.c tools/python/xen/xend/XendDomainInfo.py xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/traps.c xen/common/domain.c xen/common/grant_table.c xen/include/asm-ia64/mm.h xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/vmx_platform.h xen/include/xen/perfc.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Tue Aug 30 20:02:59 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Tue Aug 30 20:03:51 2005 +0000
     1.3 @@ -44,13 +44,6 @@
     1.4  #include <asm-xen/hypervisor.h>
     1.5  #include <asm-xen/evtchn.h>
     1.6  
     1.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     1.8 -EXPORT_SYMBOL(force_evtchn_callback);
     1.9 -EXPORT_SYMBOL(evtchn_do_upcall);
    1.10 -EXPORT_SYMBOL(bind_evtchn_to_irq);
    1.11 -EXPORT_SYMBOL(unbind_evtchn_from_irq);
    1.12 -#endif
    1.13 -
    1.14  /*
    1.15   * This lock protects updates to the following mapping and reference-count
    1.16   * arrays. The lock does not need to be acquired to read the mapping tables.
    1.17 @@ -133,6 +126,7 @@ void force_evtchn_callback(void)
    1.18  {
    1.19      (void)HYPERVISOR_xen_version(0);
    1.20  }
    1.21 +EXPORT_SYMBOL(force_evtchn_callback);
    1.22  
    1.23  /* NB. Interrupts are disabled on entry. */
    1.24  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
    1.25 @@ -165,6 +159,7 @@ asmlinkage void evtchn_do_upcall(struct 
    1.26          }
    1.27      }
    1.28  }
    1.29 +EXPORT_SYMBOL(evtchn_do_upcall);
    1.30  
    1.31  static int find_unbound_irq(void)
    1.32  {
    1.33 @@ -211,6 +206,7 @@ int bind_virq_to_irq(int virq)
    1.34      
    1.35      return irq;
    1.36  }
    1.37 +EXPORT_SYMBOL(bind_virq_to_irq);
    1.38  
    1.39  void unbind_virq_from_irq(int virq)
    1.40  {
    1.41 @@ -244,6 +240,7 @@ void unbind_virq_from_irq(int virq)
    1.42  
    1.43      spin_unlock(&irq_mapping_update_lock);
    1.44  }
    1.45 +EXPORT_SYMBOL(unbind_virq_from_irq);
    1.46  
    1.47  int bind_ipi_to_irq(int ipi)
    1.48  {
    1.49 @@ -279,6 +276,7 @@ int bind_ipi_to_irq(int ipi)
    1.50  
    1.51      return irq;
    1.52  }
    1.53 +EXPORT_SYMBOL(bind_ipi_to_irq);
    1.54  
    1.55  void unbind_ipi_from_irq(int ipi)
    1.56  {
    1.57 @@ -306,6 +304,7 @@ void unbind_ipi_from_irq(int ipi)
    1.58  
    1.59      spin_unlock(&irq_mapping_update_lock);
    1.60  }
    1.61 +EXPORT_SYMBOL(unbind_ipi_from_irq);
    1.62  
    1.63  int bind_evtchn_to_irq(unsigned int evtchn)
    1.64  {
    1.65 @@ -326,6 +325,7 @@ int bind_evtchn_to_irq(unsigned int evtc
    1.66      
    1.67      return irq;
    1.68  }
    1.69 +EXPORT_SYMBOL(bind_evtchn_to_irq);
    1.70  
    1.71  void unbind_evtchn_from_irq(unsigned int evtchn)
    1.72  {
    1.73 @@ -341,6 +341,7 @@ void unbind_evtchn_from_irq(unsigned int
    1.74  
    1.75      spin_unlock(&irq_mapping_update_lock);
    1.76  }
    1.77 +EXPORT_SYMBOL(unbind_evtchn_from_irq);
    1.78  
    1.79  int bind_evtchn_to_irqhandler(
    1.80      unsigned int evtchn,
    1.81 @@ -359,6 +360,7 @@ int bind_evtchn_to_irqhandler(
    1.82  
    1.83      return retval;
    1.84  }
    1.85 +EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
    1.86  
    1.87  void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
    1.88  {
    1.89 @@ -366,6 +368,7 @@ void unbind_evtchn_from_irqhandler(unsig
    1.90      free_irq(irq, dev_id);
    1.91      unbind_evtchn_from_irq(evtchn);
    1.92  }
    1.93 +EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
    1.94  
    1.95  #ifdef CONFIG_SMP
    1.96  static void do_nothing_function(void *ign)
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Tue Aug 30 20:02:59 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Tue Aug 30 20:03:51 2005 +0000
     2.3 @@ -149,7 +149,7 @@ void dump_pagetable(unsigned long addres
     2.4  	pmd_t *pmd;
     2.5  	pte_t *pte;
     2.6  
     2.7 -        pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
     2.8 +	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
     2.9  	pgd += pgd_index(address);
    2.10  
    2.11  	printk("PGD %lx ", pgd_val(*pgd));
    2.12 @@ -296,9 +296,9 @@ int exception_trace = 1;
    2.13  #define MEM_VERBOSE 1
    2.14  
    2.15  #ifdef MEM_VERBOSE
    2.16 -#define MEM_LOG(_f, _a...)                           \
    2.17 -  printk("fault.c:[%d]-> " _f "\n", \
    2.18 -          __LINE__ , ## _a )
    2.19 +#define MEM_LOG(_f, _a...)			\
    2.20 +	printk("fault.c:[%d]-> " _f "\n",	\
    2.21 +	__LINE__ , ## _a )
    2.22  #else
    2.23  #define MEM_LOG(_f, _a...) ((void)0)
    2.24  #endif
    2.25 @@ -325,7 +325,7 @@ asmlinkage void do_page_fault(struct pt_
    2.26  	siginfo_t info;
    2.27  
    2.28  	if (!user_mode(regs))
    2.29 -                error_code &= ~4; /* means kernel */
    2.30 +		error_code &= ~4; /* means kernel */
    2.31  
    2.32  #ifdef CONFIG_CHECKING
    2.33  	{ 
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c	Tue Aug 30 20:02:59 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c	Tue Aug 30 20:03:51 2005 +0000
     3.3 @@ -62,14 +62,16 @@ static int init_mapping_done;
     3.4   * avaialble in init_memory_mapping().
     3.5   */
     3.6  
     3.7 -#define addr_to_page(addr, page)                                             \
     3.8 -        (addr) &= PHYSICAL_PAGE_MASK;                                   \
     3.9 -        (page) = ((unsigned long *) ((unsigned long)(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map)))
    3.10 +#define addr_to_page(addr, page)				\
    3.11 +	(addr) &= PHYSICAL_PAGE_MASK;				\
    3.12 +	(page) = ((unsigned long *) ((unsigned long)		\
    3.13 +	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
    3.14 +	__START_KERNEL_map)))
    3.15  
    3.16  static void __make_page_readonly(unsigned long va)
    3.17  {
    3.18 -        unsigned long addr;
    3.19 -        pte_t pte, *ptep;
    3.20 +	unsigned long addr;
    3.21 +	pte_t pte, *ptep;
    3.22  	unsigned long *page = (unsigned long *) init_level4_pgt;
    3.23  
    3.24  	addr = (unsigned long) page[pgd_index(va)];
    3.25 @@ -89,22 +91,22 @@ static void __make_page_readonly(unsigne
    3.26  
    3.27  static void __make_page_writable(unsigned long va)
    3.28  {
    3.29 -        unsigned long addr;
    3.30 -        pte_t pte, *ptep;
    3.31 -        unsigned long *page = (unsigned long *) init_level4_pgt;
    3.32 +	unsigned long addr;
    3.33 +	pte_t pte, *ptep;
    3.34 +	unsigned long *page = (unsigned long *) init_level4_pgt;
    3.35  
    3.36 -        addr = (unsigned long) page[pgd_index(va)];
    3.37 -        addr_to_page(addr, page);
    3.38 +	addr = (unsigned long) page[pgd_index(va)];
    3.39 +	addr_to_page(addr, page);
    3.40  
    3.41 -        addr = page[pud_index(va)];
    3.42 -        addr_to_page(addr, page);
    3.43 -        
    3.44 -        addr = page[pmd_index(va)];
    3.45 -        addr_to_page(addr, page);
    3.46 +	addr = page[pud_index(va)];
    3.47 +	addr_to_page(addr, page);
    3.48 + 
    3.49 +	addr = page[pmd_index(va)];
    3.50 +	addr_to_page(addr, page);
    3.51  
    3.52 -        ptep = (pte_t *) &page[pte_index(va)];
    3.53 +	ptep = (pte_t *) &page[pte_index(va)];
    3.54  	pte.pte = (ptep->pte | _PAGE_RW);
    3.55 -        xen_l1_entry_update(ptep, pte);
    3.56 +	xen_l1_entry_update(ptep, pte);
    3.57  	__flush_tlb_one(addr);
    3.58  }
    3.59  
    3.60 @@ -115,55 +117,55 @@ static void __make_page_writable(unsigne
    3.61  void make_page_readonly(void *va)
    3.62  {
    3.63  	pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep;
    3.64 -        unsigned long addr = (unsigned long) va;
    3.65 +	unsigned long addr = (unsigned long) va;
    3.66  
    3.67 -        if (!init_mapping_done) {
    3.68 -                __make_page_readonly(addr);
    3.69 -                return;
    3.70 -        }
    3.71 -                
    3.72 -        pgd = pgd_offset_k(addr);
    3.73 -        pud = pud_offset(pgd, addr);
    3.74 -        pmd = pmd_offset(pud, addr);
    3.75 -        ptep = pte_offset_kernel(pmd, addr);
    3.76 +	if (!init_mapping_done) {
    3.77 +		__make_page_readonly(addr);
    3.78 +		return;
    3.79 +	}
    3.80 +  
    3.81 +	pgd = pgd_offset_k(addr);
    3.82 +	pud = pud_offset(pgd, addr);
    3.83 +	pmd = pmd_offset(pud, addr);
    3.84 +	ptep = pte_offset_kernel(pmd, addr);
    3.85  	pte.pte = (ptep->pte & ~_PAGE_RW);
    3.86 -        xen_l1_entry_update(ptep, pte);
    3.87 +	xen_l1_entry_update(ptep, pte);
    3.88  	__flush_tlb_one(addr);
    3.89  }
    3.90  
    3.91  void make_page_writable(void *va)
    3.92  {
    3.93 -        pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep;
    3.94 -        unsigned long addr = (unsigned long) va;
    3.95 +	pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep;
    3.96 +	unsigned long addr = (unsigned long) va;
    3.97  
    3.98 -        if (!init_mapping_done) {
    3.99 -                __make_page_writable(addr);
   3.100 -                return;
   3.101 -        }
   3.102 +	if (!init_mapping_done) {
   3.103 +		__make_page_writable(addr);
   3.104 +		return;
   3.105 +	}
   3.106  
   3.107 -        pgd = pgd_offset_k(addr);
   3.108 -        pud = pud_offset(pgd, addr);
   3.109 -        pmd = pmd_offset(pud, addr);
   3.110 -        ptep = pte_offset_kernel(pmd, addr);
   3.111 +	pgd = pgd_offset_k(addr);
   3.112 +	pud = pud_offset(pgd, addr);
   3.113 +	pmd = pmd_offset(pud, addr);
   3.114 +	ptep = pte_offset_kernel(pmd, addr);
   3.115  	pte.pte = (ptep->pte | _PAGE_RW);
   3.116 -        xen_l1_entry_update(ptep, pte);
   3.117 +	xen_l1_entry_update(ptep, pte);
   3.118  	__flush_tlb_one(addr);
   3.119  }
   3.120  
   3.121  void make_pages_readonly(void* va, unsigned nr)
   3.122  {
   3.123 -        while ( nr-- != 0 ) {
   3.124 -                make_page_readonly(va);
   3.125 -                va = (void*)((unsigned long)va + PAGE_SIZE);
   3.126 -        }
   3.127 +	while (nr-- != 0) {
   3.128 +		make_page_readonly(va);
   3.129 +		va = (void*)((unsigned long)va + PAGE_SIZE);
   3.130 +	}
   3.131  }
   3.132  
   3.133  void make_pages_writable(void* va, unsigned nr)
   3.134  {
   3.135 -        while ( nr-- != 0 ) {
   3.136 -                make_page_writable(va);
   3.137 -                va = (void*)((unsigned long)va + PAGE_SIZE);
   3.138 -        }
   3.139 +	while (nr-- != 0) {
   3.140 +		make_page_writable(va);
   3.141 +		va = (void*)((unsigned long)va + PAGE_SIZE);
   3.142 +	}
   3.143  }
   3.144  
   3.145  /*
   3.146 @@ -389,7 +391,7 @@ void __set_fixmap_user (enum fixed_addre
   3.147          set_pte_phys(address, phys, prot, SET_FIXMAP_USER); 
   3.148  }
   3.149  
   3.150 -unsigned long __initdata table_start, table_end, tables_space; 
   3.151 +unsigned long __initdata table_start, tables_space; 
   3.152  
   3.153  unsigned long get_machine_pfn(unsigned long addr)
   3.154  {
   3.155 @@ -400,38 +402,13 @@ unsigned long get_machine_pfn(unsigned l
   3.156          return pte_mfn(*pte);
   3.157  } 
   3.158  
   3.159 -#define ALIGN_TO_4K __attribute__((section(".data.page_aligned")))
   3.160 -#define MAX_LOW_PAGES	0x20
   3.161 -static unsigned long __init_pgt[MAX_LOW_PAGES][512]  ALIGN_TO_4K;
   3.162 -static int __init_pgt_index;
   3.163 -
   3.164 -/*
   3.165 - * We start using from start_pfn
   3.166 - */
   3.167  static __init void *alloc_static_page(unsigned long *phys)
   3.168  {
   3.169 -	int i = __init_pgt_index++;
   3.170 -
   3.171 -	if (__init_pgt_index >= MAX_LOW_PAGES) {
   3.172 -		printk("Need to increase MAX_LOW_PAGES");
   3.173 -		BUG();
   3.174 -	}
   3.175 -		
   3.176 -	*phys = __pa(__init_pgt[i]);
   3.177 -
   3.178 -	return (void *) __init_pgt[i];
   3.179 -} 
   3.180 -
   3.181 -/*
   3.182 - * Get RO page
   3.183 - */
   3.184 -static void __init *alloc_low_page(unsigned long *phys)
   3.185 -{ 
   3.186 -        unsigned long pfn = table_end++;
   3.187 -    
   3.188 -        *phys = (pfn << PAGE_SHIFT);
   3.189 -        memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE);
   3.190 -        return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map);
   3.191 +	unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
   3.192 +	*phys = start_pfn << PAGE_SHIFT;
   3.193 +	start_pfn++;
   3.194 +	memset((void *)va, 0, PAGE_SIZE);
   3.195 +	return (void *)va;
   3.196  } 
   3.197  
   3.198  #define PTE_SIZE PAGE_SIZE
   3.199 @@ -443,27 +420,21 @@ static inline void __set_pte(pte_t *dst,
   3.200  
   3.201  static inline int make_readonly(unsigned long paddr)
   3.202  {
   3.203 -    int readonly = 0;
   3.204 -
   3.205 -    /* Make new page tables read-only. */
   3.206 -    if ((paddr < ((table_start << PAGE_SHIFT) + tables_space)) &&
   3.207 -        (paddr >= (table_start << PAGE_SHIFT)))
   3.208 -        readonly = 1;
   3.209 +	int readonly = 0;
   3.210  
   3.211 -    /* Make old page tables read-only. */
   3.212 -    if ((paddr < ((xen_start_info.pt_base - __START_KERNEL_map) +
   3.213 -                  (xen_start_info.nr_pt_frames << PAGE_SHIFT))) &&
   3.214 -        (paddr >= (xen_start_info.pt_base - __START_KERNEL_map)))
   3.215 -        readonly = 1;
   3.216 +	/* Make old and new page tables read-only. */
   3.217 +	if ((paddr >= (xen_start_info.pt_base - __START_KERNEL_map))
   3.218 +	    && (paddr < ((table_start << PAGE_SHIFT) + tables_space)))
   3.219 +		readonly = 1;
   3.220 +	/*
   3.221 +	 * No need for writable mapping of kernel image. This also ensures that
   3.222 +	 * page and descriptor tables embedded inside don't have writable
   3.223 +	 * mappings. 
   3.224 +	 */
   3.225 +	if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end)))
   3.226 +		readonly = 1;
   3.227  
   3.228 -    /*
   3.229 -     * No need for writable mapping of kernel image. This also ensures that
   3.230 -     * page and descriptor tables embedded inside don't have writable mappings.
   3.231 -     */
   3.232 -    if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end)))
   3.233 -        readonly = 1;
   3.234 -
   3.235 -    return readonly;
   3.236 +	return readonly;
   3.237  }
   3.238  
   3.239  static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
   3.240 @@ -485,7 +456,7 @@ static void __init phys_pud_init(pud_t *
   3.241  			break;
   3.242  		} 
   3.243  
   3.244 -		pmd = alloc_low_page(&pmd_phys);
   3.245 +		pmd = alloc_static_page(&pmd_phys);
   3.246                  make_page_readonly(pmd);
   3.247                  xen_pmd_pin(pmd_phys);
   3.248  		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
   3.249 @@ -499,7 +470,7 @@ static void __init phys_pud_init(pud_t *
   3.250  					set_pmd(pmd,  __pmd(0)); 
   3.251  				break;
   3.252  			}
   3.253 -                        pte = alloc_low_page(&pte_phys);
   3.254 +                        pte = alloc_static_page(&pte_phys);
   3.255                          pte_save = pte;
   3.256                          for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
   3.257                                  if ((paddr >= end) ||
   3.258 @@ -526,15 +497,16 @@ static void __init phys_pud_init(pud_t *
   3.259  
   3.260  static void __init find_early_table_space(unsigned long end)
   3.261  {
   3.262 -        unsigned long puds, pmds, ptes; 
   3.263 +	unsigned long puds, pmds, ptes; 
   3.264  
   3.265  	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
   3.266  	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
   3.267 -        ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
   3.268 +	ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
   3.269  
   3.270 -        tables_space = round_up(puds * 8, PAGE_SIZE) + 
   3.271 -	    		  round_up(pmds * 8, PAGE_SIZE) + 
   3.272 -	    		  round_up(ptes * 8, PAGE_SIZE); 
   3.273 +	tables_space =
   3.274 +		round_up(puds * 8, PAGE_SIZE) + 
   3.275 +		round_up(pmds * 8, PAGE_SIZE) + 
   3.276 +		round_up(ptes * 8, PAGE_SIZE); 
   3.277  }
   3.278  
   3.279  void __init xen_init_pt(void)
   3.280 @@ -580,66 +552,59 @@ void __init xen_init_pt(void)
   3.281  		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
   3.282  }
   3.283  
   3.284 -/*
   3.285 - * Extend kernel mapping to access pages for page tables.  The initial
   3.286 - * mapping done by Xen is minimal (e.g. 8MB) and we need to extend the
   3.287 - * mapping for early initialization.
   3.288 - */
   3.289 -static unsigned long current_size, extended_size;
   3.290 -
   3.291  void __init extend_init_mapping(void) 
   3.292  {
   3.293  	unsigned long va = __START_KERNEL_map;
   3.294  	unsigned long phys, addr, *pte_page;
   3.295 -        pmd_t *pmd;
   3.296 +	pmd_t *pmd;
   3.297  	pte_t *pte, new_pte;
   3.298 -	unsigned long *page = (unsigned long *) init_level4_pgt;
   3.299 -	int i;
   3.300 +	unsigned long *page = (unsigned long *)init_level4_pgt;
   3.301  
   3.302  	addr = page[pgd_index(va)];
   3.303  	addr_to_page(addr, page);
   3.304  	addr = page[pud_index(va)];
   3.305  	addr_to_page(addr, page);
   3.306  
   3.307 -	for (;;) {
   3.308 -		pmd = (pmd_t *)&page[pmd_index(va)];
   3.309 -		if (!pmd_present(*pmd))
   3.310 -			break;
   3.311 -		addr = page[pmd_index(va)];
   3.312 -		addr_to_page(addr, pte_page);
   3.313 -		for (i = 0; i < PTRS_PER_PTE; i++) {
   3.314 -			pte = (pte_t *) &pte_page[pte_index(va)];
   3.315 -			if (!pte_present(*pte))
   3.316 -				break;
   3.317 -			va += PAGE_SIZE;
   3.318 -			current_size += PAGE_SIZE;
   3.319 -		}
   3.320 +	/* Kill mapping of low 1MB. */
   3.321 +	while (va < (unsigned long)&_text) {
   3.322 +		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
   3.323 +		va += PAGE_SIZE;
   3.324  	}
   3.325  
   3.326 -	while (va < __START_KERNEL_map + current_size + tables_space) {
   3.327 -		pmd = (pmd_t *) &page[pmd_index(va)];
   3.328 -		if (!pmd_none(*pmd))
   3.329 -			continue;
   3.330 -		pte_page = (unsigned long *) alloc_static_page(&phys);
   3.331 -		make_page_readonly(pte_page);
   3.332 -		xen_pte_pin(phys);
   3.333 -		set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
   3.334 -		for (i = 0; i < PTRS_PER_PTE; i++, va += PAGE_SIZE) {
   3.335 +	/* Ensure init mappings cover kernel text/data and initial tables. */
   3.336 +	while (va < (__START_KERNEL_map
   3.337 +		     + (start_pfn << PAGE_SHIFT)
   3.338 +		     + tables_space)) {
   3.339 +		pmd = (pmd_t *)&page[pmd_index(va)];
   3.340 +		if (pmd_none(*pmd)) {
   3.341 +			pte_page = alloc_static_page(&phys);
   3.342 +			make_page_readonly(pte_page);
   3.343 +			xen_pte_pin(phys);
   3.344 +			set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
   3.345 +		} else {
   3.346 +			addr = page[pmd_index(va)];
   3.347 +			addr_to_page(addr, pte_page);
   3.348 +		}
   3.349 +		pte = (pte_t *)&pte_page[pte_index(va)];
   3.350 +		if (pte_none(*pte)) {
   3.351  			new_pte = pfn_pte(
   3.352  				(va - __START_KERNEL_map) >> PAGE_SHIFT, 
   3.353  				__pgprot(_KERNPG_TABLE | _PAGE_USER));
   3.354 -			pte = (pte_t *)&pte_page[pte_index(va)];
   3.355  			xen_l1_entry_update(pte, new_pte);
   3.356 -			extended_size += PAGE_SIZE;
   3.357  		}
   3.358 +		va += PAGE_SIZE;
   3.359  	}
   3.360  
   3.361 -	/* Kill mapping of low 1MB. */
   3.362 -	for (va = __START_KERNEL_map; va < (unsigned long)&_text; va += PAGE_SIZE)
   3.363 +	/* Finally, blow away any spurious initial mappings. */
   3.364 +	while (1) {
   3.365 +		pmd = (pmd_t *)&page[pmd_index(va)];
   3.366 +		if (pmd_none(*pmd))
   3.367 +			break;
   3.368  		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
   3.369 +		va += PAGE_SIZE;
   3.370 +	}
   3.371  }
   3.372  
   3.373 -
   3.374  /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
   3.375     This runs before bootmem is initialized and gets pages directly from the 
   3.376     physical memory. To access them they are temporarily mapped. */
   3.377 @@ -651,34 +616,31 @@ void __init init_memory_mapping(unsigned
   3.378  
   3.379  	find_early_table_space(end);
   3.380  	extend_init_mapping();
   3.381 -	start_pfn = current_size >> PAGE_SHIFT;
   3.382  
   3.383  	table_start = start_pfn;
   3.384 -	table_end = table_start;
   3.385  
   3.386  	start = (unsigned long)__va(start);
   3.387  	end = (unsigned long)__va(end);
   3.388  
   3.389  	for (; start < end; start = next) {
   3.390  		unsigned long pud_phys; 
   3.391 -                pud_t *pud = alloc_low_page(&pud_phys);
   3.392 -                make_page_readonly(pud);
   3.393 -                xen_pud_pin(pud_phys);
   3.394 +		pud_t *pud = alloc_static_page(&pud_phys);
   3.395 +		make_page_readonly(pud);
   3.396 +		xen_pud_pin(pud_phys);
   3.397  		next = start + PGDIR_SIZE;
   3.398  		if (next > end) 
   3.399  			next = end; 
   3.400  		phys_pud_init(pud, __pa(start), __pa(next));
   3.401  		set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
   3.402 -	} 
   3.403 +	}
   3.404  
   3.405 -	printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, 
   3.406 -	       table_start<<PAGE_SHIFT, 
   3.407 -	       table_end<<PAGE_SHIFT);
   3.408 +	printk("kernel direct mapping tables upto %lx @ %lx-%lx\n",
   3.409 +	       __pa(end), table_start<<PAGE_SHIFT, start_pfn<<PAGE_SHIFT);
   3.410  
   3.411 -        start_pfn = ((current_size + extended_size) >> PAGE_SHIFT);
   3.412 +	BUG_ON(start_pfn != (table_start + (tables_space >> PAGE_SHIFT)));
   3.413  
   3.414  	__flush_tlb_all();
   3.415 -        init_mapping_done = 1;
   3.416 +	init_mapping_done = 1;
   3.417  }
   3.418  
   3.419  extern struct x8664_pda cpu_pda[NR_CPUS];
   3.420 @@ -1003,3 +965,13 @@ int in_gate_area_no_task(unsigned long a
   3.421  {
   3.422  	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
   3.423  }
   3.424 +
   3.425 +/*
   3.426 + * Local variables:
   3.427 + *  c-file-style: "linux"
   3.428 + *  indent-tabs-mode: t
   3.429 + *  c-indent-level: 8
   3.430 + *  c-basic-offset: 8
   3.431 + *  tab-width: 8
   3.432 + * End:
   3.433 + */
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Tue Aug 30 20:02:59 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Tue Aug 30 20:03:51 2005 +0000
     4.3 @@ -45,7 +45,9 @@
     4.4  
     4.5  static char printf_buffer[4096];
     4.6  static LIST_HEAD(watches);
     4.7 +
     4.8  DECLARE_MUTEX(xenbus_lock);
     4.9 +EXPORT_SYMBOL(xenbus_lock);
    4.10  
    4.11  static int get_error(const char *errorstring)
    4.12  {
    4.13 @@ -224,6 +226,7 @@ char **xenbus_directory(const char *dir,
    4.14  		ret[(*num)++] = p;
    4.15  	return ret;
    4.16  }
    4.17 +EXPORT_SYMBOL(xenbus_directory);
    4.18  
    4.19  /* Check if a path exists. Return 1 if it does. */
    4.20  int xenbus_exists(const char *dir, const char *node)
    4.21 @@ -237,6 +240,7 @@ int xenbus_exists(const char *dir, const
    4.22  	kfree(d);
    4.23  	return 1;
    4.24  }
    4.25 +EXPORT_SYMBOL(xenbus_exists);
    4.26  
    4.27  /* Get the value of a single file.
    4.28   * Returns a kmalloced value: call free() on it after use.
    4.29 @@ -277,18 +281,21 @@ int xenbus_write(const char *dir, const 
    4.30  
    4.31  	return xs_error(xs_talkv(XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
    4.32  }
    4.33 +EXPORT_SYMBOL(xenbus_write);
    4.34  
    4.35  /* Create a new directory. */
    4.36  int xenbus_mkdir(const char *dir, const char *node)
    4.37  {
    4.38  	return xs_error(xs_single(XS_MKDIR, join(dir, node), NULL));
    4.39  }
    4.40 +EXPORT_SYMBOL(xenbus_mkdir);
    4.41  
    4.42  /* Destroy a file or directory (directories must be empty). */
    4.43  int xenbus_rm(const char *dir, const char *node)
    4.44  {
    4.45  	return xs_error(xs_single(XS_RM, join(dir, node), NULL));
    4.46  }
    4.47 +EXPORT_SYMBOL(xenbus_rm);
    4.48  
    4.49  /* Start a transaction: changes by others will not be seen during this
    4.50   * transaction, and changes will not be visible to others until end.
     9.1 --- a/tools/misc/cpuperf/cpuperf.c	Tue Aug 30 20:02:59 2005 +0000
     9.2 +++ b/tools/misc/cpuperf/cpuperf.c	Tue Aug 30 20:03:51 2005 +0000
     9.3 @@ -243,16 +243,12 @@ int main(int argc, char **argv)
     9.4      }
     9.5  
     9.6      if (read) {
     9.7 -        while((cpu_mask&1)) {
     9.8 -            int i;
     9.9 -            for (i=0x300;i<0x312;i++) {
    9.10 -                printf("%010llu ",cpus_rdmsr( cpu_mask, i ) );
    9.11 -            }
    9.12 -            printf("\n");
    9.13 -            cpu_mask>>=1;
    9.14 -        }
    9.15 +        int i;
    9.16 +        for (i=0x300;i<0x312;i++)
    9.17 +            printf("%010llu ",cpus_rdmsr( cpu_mask, i ) );
    9.18 +        printf("\n");
    9.19          exit(1);
    9.20 -    } 
    9.21 +    }
    9.22      
    9.23      if (!escr) {
    9.24          fprintf(stderr, "Need an ESCR.\n");
    11.1 --- a/xen/arch/x86/domain.c	Tue Aug 30 20:02:59 2005 +0000
    11.2 +++ b/xen/arch/x86/domain.c	Tue Aug 30 20:03:51 2005 +0000
    11.3 @@ -255,13 +255,13 @@ void arch_do_createdomain(struct vcpu *v
    11.4      v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    11.5      v->cpumap = CPUMAP_RUNANYWHERE;
    11.6      SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    11.7 -    machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    11.8 -                           PAGE_SHIFT] = INVALID_M2P_ENTRY;
    11.9 +    set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
   11.10 +            INVALID_M2P_ENTRY);
   11.11      
   11.12      d->arch.mm_perdomain_pt = alloc_xenheap_page();
   11.13      memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
   11.14 -    machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
   11.15 -                           PAGE_SHIFT] = INVALID_M2P_ENTRY;
   11.16 +    set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
   11.17 +            INVALID_M2P_ENTRY);
   11.18      v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
   11.19      v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
   11.20          l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    12.1 --- a/xen/arch/x86/domain_build.c	Tue Aug 30 20:02:59 2005 +0000
    12.2 +++ b/xen/arch/x86/domain_build.c	Tue Aug 30 20:03:51 2005 +0000
    12.3 @@ -592,8 +592,7 @@ int construct_dom0(struct domain *d,
    12.4      if ( opt_dom0_translate )
    12.5      {
    12.6          si->shared_info  = d->next_io_page << PAGE_SHIFT;
    12.7 -        set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
    12.8 -                          d->next_io_page);
    12.9 +        set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, d->next_io_page);
   12.10          d->next_io_page++;
   12.11      }
   12.12      else
   12.13 @@ -614,7 +613,7 @@ int construct_dom0(struct domain *d,
   12.14              mfn = alloc_epfn - (pfn - REVERSE_START);
   12.15  #endif
   12.16          ((u32 *)vphysmap_start)[pfn] = mfn;
   12.17 -        machine_to_phys_mapping[mfn] = pfn;
   12.18 +        set_pfn_from_mfn(mfn, pfn);
   12.19      }
   12.20      while ( pfn < nr_pages )
   12.21      {
   12.22 @@ -627,7 +626,7 @@ int construct_dom0(struct domain *d,
   12.23  #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
   12.24  #endif
   12.25              ((u32 *)vphysmap_start)[pfn] = mfn;
   12.26 -            machine_to_phys_mapping[mfn] = pfn;
   12.27 +            set_pfn_from_mfn(mfn, pfn);
   12.28  #undef pfn
   12.29              page++; pfn++;
   12.30          }
    13.1 --- a/xen/arch/x86/mm.c	Tue Aug 30 20:02:59 2005 +0000
    13.2 +++ b/xen/arch/x86/mm.c	Tue Aug 30 20:03:51 2005 +0000
    13.3 @@ -1452,7 +1452,7 @@ int get_page_type(struct pfn_info *page,
    13.4                                  "!= exp %" PRtype_info ") "
    13.5                                  "for mfn %lx (pfn %x)",
    13.6                                  x, type, page_to_pfn(page),
    13.7 -                                machine_to_phys_mapping[page_to_pfn(page)]);
    13.8 +                                get_pfn_from_mfn(page_to_pfn(page)));
    13.9                      return 0;
   13.10                  }
   13.11                  else if ( (x & PGT_va_mask) == PGT_va_mutable )
   13.12 @@ -2206,7 +2206,7 @@ int do_mmu_update(
   13.13                  printk("privileged guest dom%d requests pfn=%lx to "
   13.14                         "map mfn=%lx for dom%d\n",
   13.15                         d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id);
   13.16 -                set_machinetophys(mfn, gpfn);
   13.17 +                set_pfn_from_mfn(mfn, gpfn);
   13.18                  set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache);
   13.19                  okay = 1;
   13.20                  shadow_unlock(FOREIGNDOM);
   13.21 @@ -2225,7 +2225,7 @@ int do_mmu_update(
   13.22                  break;
   13.23              }
   13.24  
   13.25 -            set_machinetophys(mfn, gpfn);
   13.26 +            set_pfn_from_mfn(mfn, gpfn);
   13.27              okay = 1;
   13.28  
   13.29              /*
    14.1 --- a/xen/arch/x86/shadow32.c	Tue Aug 30 20:02:59 2005 +0000
    14.2 +++ b/xen/arch/x86/shadow32.c	Tue Aug 30 20:03:51 2005 +0000
    14.3 @@ -827,7 +827,7 @@ alloc_p2m_table(struct domain *d)
    14.4      {
    14.5          page = list_entry(list_ent, struct pfn_info, list);
    14.6          mfn = page_to_pfn(page);
    14.7 -        pfn = machine_to_phys_mapping[mfn];
    14.8 +        pfn = get_pfn_from_mfn(mfn);
    14.9          ASSERT(pfn != INVALID_M2P_ENTRY);
   14.10          ASSERT(pfn < (1u<<20));
   14.11  
   14.12 @@ -841,7 +841,7 @@ alloc_p2m_table(struct domain *d)
   14.13      {
   14.14          page = list_entry(list_ent, struct pfn_info, list);
   14.15          mfn = page_to_pfn(page);
   14.16 -        pfn = machine_to_phys_mapping[mfn];
   14.17 +        pfn = get_pfn_from_mfn(mfn);
   14.18          if ( (pfn != INVALID_M2P_ENTRY) &&
   14.19               (pfn < (1u<<20)) )
   14.20          {
    15.1 --- a/xen/arch/x86/shadow_public.c	Tue Aug 30 20:02:59 2005 +0000
    15.2 +++ b/xen/arch/x86/shadow_public.c	Tue Aug 30 20:03:51 2005 +0000
    15.3 @@ -1311,7 +1311,7 @@ alloc_p2m_table(struct domain *d)
    15.4      {
    15.5          page = list_entry(list_ent, struct pfn_info, list);
    15.6          mfn = page_to_pfn(page);
    15.7 -        pfn = machine_to_phys_mapping[mfn];
    15.8 +        pfn = get_pfn_from_mfn(mfn);
    15.9          ASSERT(pfn != INVALID_M2P_ENTRY);
   15.10          ASSERT(pfn < (1u<<20));
   15.11  
   15.12 @@ -1325,7 +1325,7 @@ alloc_p2m_table(struct domain *d)
   15.13      {
   15.14          page = list_entry(list_ent, struct pfn_info, list);
   15.15          mfn = page_to_pfn(page);
   15.16 -        pfn = machine_to_phys_mapping[mfn];
   15.17 +        pfn = get_pfn_from_mfn(mfn);
   15.18          if ( (pfn != INVALID_M2P_ENTRY) &&
   15.19               (pfn < (1u<<20)) )
   15.20          {
    16.1 --- a/xen/arch/x86/traps.c	Tue Aug 30 20:02:59 2005 +0000
    16.2 +++ b/xen/arch/x86/traps.c	Tue Aug 30 20:03:51 2005 +0000
    16.3 @@ -100,6 +100,7 @@ unsigned long do_get_debugreg(int reg);
    16.4  
    16.5  static int debug_stack_lines = 20;
    16.6  integer_param("debug_stack_lines", debug_stack_lines);
    16.7 +#define stack_words_per_line (32 / BYTES_PER_LONG)
    16.8  
    16.9  int is_kernel_text(unsigned long addr)
   16.10  {
   16.11 @@ -125,7 +126,7 @@ void show_guest_stack(void)
   16.12  
   16.13      printk("Guest stack trace from "__OP"sp=%p:\n   ", stack);
   16.14  
   16.15 -    for ( i = 0; i < (debug_stack_lines*8); i++ )
   16.16 +    for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
   16.17      {
   16.18          if ( ((long)stack & (STACK_SIZE-1)) == 0 )
   16.19              break;
   16.20 @@ -137,7 +138,7 @@ void show_guest_stack(void)
   16.21              i = 1;
   16.22              break;
   16.23          }
   16.24 -        if ( (i != 0) && ((i % 8) == 0) )
   16.25 +        if ( (i != 0) && ((i % stack_words_per_line) == 0) )
   16.26              printk("\n   ");
   16.27          printk("%p ", _p(addr));
   16.28          stack++;
   16.29 @@ -176,11 +177,11 @@ void show_stack(unsigned long *esp)
   16.30  
   16.31      printk("Xen stack trace from "__OP"sp=%p:\n   ", stack);
   16.32  
   16.33 -    for ( i = 0; i < (debug_stack_lines*8); i++ )
   16.34 +    for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
   16.35      {
   16.36          if ( ((long)stack & (STACK_SIZE-1)) == 0 )
   16.37              break;
   16.38 -        if ( (i != 0) && ((i % 8) == 0) )
   16.39 +        if ( (i != 0) && ((i % stack_words_per_line) == 0) )
   16.40              printk("\n   ");
   16.41          addr = *stack++;
   16.42          printk("%p ", _p(addr));
    17.1 --- a/xen/arch/x86/vmx.c	Tue Aug 30 20:02:59 2005 +0000
    17.2 +++ b/xen/arch/x86/vmx.c	Tue Aug 30 20:03:51 2005 +0000
    17.3 @@ -694,7 +694,7 @@ vmx_copy(void *buf, unsigned long laddr,
    17.4          return 0;
    17.5      }
    17.6  
    17.7 -    mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT);
    17.8 +    mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
    17.9      addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
   17.10  
   17.11      if (dir == COPY_IN)
   17.12 @@ -795,7 +795,7 @@ vmx_world_restore(struct vcpu *d, struct
   17.13  	 * removed some translation or changed page attributes.
   17.14  	 * We simply invalidate the shadow.
   17.15  	 */
   17.16 -	mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
   17.17 +	mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
   17.18  	if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
   17.19  	    printk("Invalid CR3 value=%x", c->cr3);
   17.20  	    domain_crash_synchronous();
   17.21 @@ -813,7 +813,7 @@ vmx_world_restore(struct vcpu *d, struct
   17.22  	    domain_crash_synchronous(); 
   17.23  	    return 0;
   17.24  	}
   17.25 -	mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
   17.26 +	mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
   17.27  	d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   17.28  	update_pagetables(d);
   17.29  	/* 
   17.30 @@ -968,7 +968,7 @@ static int vmx_set_cr0(unsigned long val
   17.31          /*
   17.32           * The guest CR3 must be pointing to the guest physical.
   17.33           */
   17.34 -        if ( !VALID_MFN(mfn = phys_to_machine_mapping(
   17.35 +        if ( !VALID_MFN(mfn = get_mfn_from_pfn(
   17.36                              d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
   17.37               !get_page(pfn_to_page(mfn), d->domain) )
   17.38          {
   17.39 @@ -1164,7 +1164,7 @@ static int mov_to_cr(int gp, int cr, str
   17.40               * removed some translation or changed page attributes.
   17.41               * We simply invalidate the shadow.
   17.42               */
   17.43 -            mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   17.44 +            mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
   17.45              if (mfn != pagetable_get_pfn(d->arch.guest_table))
   17.46                  __vmx_bug(regs);
   17.47              shadow_sync_all(d->domain);
   17.48 @@ -1175,7 +1175,7 @@ static int mov_to_cr(int gp, int cr, str
   17.49               */
   17.50              VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   17.51              if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
   17.52 -                 !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) ||
   17.53 +                 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
   17.54                   !get_page(pfn_to_page(mfn), d->domain) )
   17.55              {
   17.56                  printk("Invalid CR3 value=%lx", value);
    18.1 --- a/xen/arch/x86/vmx_platform.c	Tue Aug 30 20:02:59 2005 +0000
    18.2 +++ b/xen/arch/x86/vmx_platform.c	Tue Aug 30 20:03:51 2005 +0000
    18.3 @@ -521,7 +521,7 @@ int inst_copy_from_guest(unsigned char *
    18.4      if ( vmx_paging_enabled(current) )
    18.5      {
    18.6          gpa = gva_to_gpa(guest_eip);
    18.7 -        mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
    18.8 +        mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
    18.9  
   18.10          /* Does this cross a page boundary ? */
   18.11          if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) )
   18.12 @@ -532,7 +532,7 @@ int inst_copy_from_guest(unsigned char *
   18.13      }
   18.14      else
   18.15      {
   18.16 -        mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
   18.17 +        mfn = get_mfn_from_pfn(guest_eip >> PAGE_SHIFT);
   18.18      }
   18.19  
   18.20      inst_start = map_domain_page(mfn);
   18.21 @@ -542,7 +542,7 @@ int inst_copy_from_guest(unsigned char *
   18.22      if ( remaining )
   18.23      {
   18.24          gpa = gva_to_gpa(guest_eip+inst_len+remaining);
   18.25 -        mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
   18.26 +        mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
   18.27  
   18.28          inst_start = map_domain_page(mfn);
   18.29          memcpy((char *)buf+inst_len, inst_start, remaining);
    19.1 --- a/xen/arch/x86/vmx_vmcs.c	Tue Aug 30 20:02:59 2005 +0000
    19.2 +++ b/xen/arch/x86/vmx_vmcs.c	Tue Aug 30 20:03:51 2005 +0000
    19.3 @@ -148,7 +148,7 @@ int vmx_setup_platform(struct vcpu *d, s
    19.4      offset = (addr & ~PAGE_MASK);
    19.5      addr = round_pgdown(addr);
    19.6  
    19.7 -    mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
    19.8 +    mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT);
    19.9      p = map_domain_page(mpfn);
   19.10  
   19.11      e820p = (struct e820entry *) ((unsigned long) p + offset); 
   19.12 @@ -175,7 +175,7 @@ int vmx_setup_platform(struct vcpu *d, s
   19.13      unmap_domain_page(p);        
   19.14  
   19.15      /* Initialise shared page */
   19.16 -    mpfn = phys_to_machine_mapping(gpfn);
   19.17 +    mpfn = get_mfn_from_pfn(gpfn);
   19.18      p = map_domain_page(mpfn);
   19.19      d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
   19.20  
    20.1 --- a/xen/arch/x86/x86_64/entry.S	Tue Aug 30 20:02:59 2005 +0000
    20.2 +++ b/xen/arch/x86/x86_64/entry.S	Tue Aug 30 20:03:51 2005 +0000
    20.3 @@ -339,7 +339,8 @@ create_bounce_frame:
    20.4  1:      /* In kernel context already: push new frame at existing %rsp. */
    20.5          movq  UREGS_rsp+8(%rsp),%rsi
    20.6          andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.
    20.7 -2:      movq  $HYPERVISOR_VIRT_START,%rax
    20.8 +2:      andq  $~0xf,%rsi                # Stack frames are 16-byte aligned.
    20.9 +        movq  $HYPERVISOR_VIRT_START,%rax
   20.10          cmpq  %rax,%rsi
   20.11          jb    1f                        # In +ve address space? Then okay.
   20.12          movq  $HYPERVISOR_VIRT_END+60,%rax
    21.1 --- a/xen/arch/x86/x86_64/traps.c	Tue Aug 30 20:02:59 2005 +0000
    21.2 +++ b/xen/arch/x86/x86_64/traps.c	Tue Aug 30 20:03:51 2005 +0000
    21.3 @@ -15,19 +15,22 @@
    21.4  
    21.5  void show_registers(struct cpu_user_regs *regs)
    21.6  {
    21.7 -    printk("CPU:    %d\nEIP:    %04x:[<%016lx>]",
    21.8 +    printk("CPU:    %d\nRIP:    %04x:[<%016lx>]",
    21.9             smp_processor_id(), 0xffff & regs->cs, regs->rip);
   21.10      if ( !GUEST_MODE(regs) )
   21.11          print_symbol(" %s", regs->rip);
   21.12 -    printk("\nEFLAGS: %016lx\n", regs->eflags);
   21.13 -    printk("rax: %016lx   rbx: %016lx   rcx: %016lx   rdx: %016lx\n",
   21.14 -           regs->rax, regs->rbx, regs->rcx, regs->rdx);
   21.15 -    printk("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
   21.16 -           regs->rsi, regs->rdi, regs->rbp, regs->rsp);
   21.17 -    printk("r8:  %016lx   r9:  %016lx   r10: %016lx   r11: %016lx\n",
   21.18 -           regs->r8,  regs->r9,  regs->r10, regs->r11);
   21.19 -    printk("r12: %016lx   r13: %016lx   r14: %016lx   r15: %016lx\n",
   21.20 -           regs->r12, regs->r13, regs->r14, regs->r15);
   21.21 +    printk("\nRFLAGS: %016lx\n", regs->eflags);
   21.22 +    printk("rax: %016lx   rbx: %016lx   rcx: %016lx\n",
   21.23 +           regs->rax, regs->rbx, regs->rcx);
   21.24 +    printk("rdx: %016lx   rsi: %016lx   rdi: %016lx\n",
   21.25 +           regs->rdx, regs->rsi, regs->rdi);
   21.26 +    printk("rbp: %016lx   rsp: %016lx   r8:  %016lx\n",
   21.27 +           regs->rbp, regs->rsp, regs->r8);
   21.28 +    printk("r9:  %016lx   r10: %016lx   r11: %016lx\n",
   21.29 +           regs->r9,  regs->r10, regs->r11);
   21.30 +    printk("r12: %016lx   r13: %016lx   r14: %016lx\n",
   21.31 +           regs->r12, regs->r13, regs->r14);
   21.32 +    printk("r15: %016lx\n", regs->r15);
   21.33  
   21.34      if ( GUEST_MODE(regs) )
   21.35          show_guest_stack();
    23.1 --- a/xen/common/grant_table.c	Tue Aug 30 20:02:59 2005 +0000
    23.2 +++ b/xen/common/grant_table.c	Tue Aug 30 20:03:51 2005 +0000
    23.3 @@ -1211,13 +1211,13 @@ gnttab_notify_transfer(
    23.4          DPRINTK("Bad pfn (%lx)\n", pfn);
    23.5      else
    23.6      {
    23.7 -        machine_to_phys_mapping[frame] = pfn;
    23.8 +        set_pfn_from_mfn(frame, pfn);
    23.9  
   23.10          if ( unlikely(shadow_mode_log_dirty(ld)))
   23.11               mark_dirty(ld, frame);
   23.12  
   23.13          if (shadow_mode_translate(ld))
   23.14 -            __phys_to_machine_mapping[pfn] = frame;
   23.15 +            set_mfn_from_pfn(pfn, frame);
   23.16      }
   23.17      sha->frame = __mfn_to_gpfn(rd, frame);
   23.18      sha->domid = rd->domain_id;
   23.19 @@ -1268,8 +1268,7 @@ grant_table_create(
   23.20      {
   23.21          SHARE_PFN_WITH_DOMAIN(
   23.22              virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
   23.23 -        machine_to_phys_mapping[(virt_to_phys(t->shared) >> PAGE_SHIFT) + i] =
   23.24 -            INVALID_M2P_ENTRY;
   23.25 +        set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY);
   23.26      }
   23.27  
   23.28      /* Okay, install the structure. */
    24.1 --- a/xen/include/asm-ia64/mm.h	Tue Aug 30 20:02:59 2005 +0000
    24.2 +++ b/xen/include/asm-ia64/mm.h	Tue Aug 30 20:03:51 2005 +0000
    24.3 @@ -405,7 +405,7 @@ extern unsigned long *mpt_table;
    24.4  /* If pmt table is provided by control pannel later, we need __get_user
    24.5  * here. However if it's allocated by HV, we should access it directly
    24.6  */
    24.7 -#define phys_to_machine_mapping(d, gpfn)			\
    24.8 +#define get_mfn_from_pfn(d, gpfn)			\
    24.9      ((d) == dom0 ? gpfn : 					\
   24.10  	(gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] :	\
   24.11  		INVALID_MFN))
   24.12 @@ -414,7 +414,7 @@ extern unsigned long *mpt_table;
   24.13      machine_to_phys_mapping[(mfn)]
   24.14  
   24.15  #define __gpfn_to_mfn(_d, gpfn)			\
   24.16 -    phys_to_machine_mapping((_d), (gpfn))
   24.17 +    get_mfn_from_pfn((_d), (gpfn))
   24.18  
   24.19  #define __gpfn_invalid(_d, gpfn)			\
   24.20  	(__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
    25.1 --- a/xen/include/asm-x86/mm.h	Tue Aug 30 20:02:59 2005 +0000
    25.2 +++ b/xen/include/asm-x86/mm.h	Tue Aug 30 20:03:51 2005 +0000
    25.3 @@ -255,28 +255,31 @@ int check_descriptor(struct desc_struct 
    25.4   * contiguous (or near contiguous) physical memory.
    25.5   */
    25.6  #undef  machine_to_phys_mapping
    25.7 -#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START)
    25.8 +#define machine_to_phys_mapping  ((u32 *)RDWR_MPT_VIRT_START)
    25.9  #define INVALID_M2P_ENTRY        (~0U)
   25.10  #define VALID_M2P(_e)            (!((_e) & (1U<<31)))
   25.11  #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
   25.12  
   25.13 +#define set_pfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
   25.14 +#define get_pfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
   25.15 +
   25.16  /*
   25.17   * The phys_to_machine_mapping is the reversed mapping of MPT for full
   25.18   * virtualization.  It is only used by shadow_mode_translate()==true
   25.19   * guests, so we steal the address space that would have normally
   25.20   * been used by the read-only MPT map.
   25.21   */
   25.22 -#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START)
   25.23 -#define INVALID_MFN               (~0UL)
   25.24 -#define VALID_MFN(_mfn)           (!((_mfn) & (1U<<31)))
   25.25 +#define phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START)
   25.26 +#define INVALID_MFN             (~0UL)
   25.27 +#define VALID_MFN(_mfn)         (!((_mfn) & (1U<<31)))
   25.28  
   25.29 -/* Returns the machine physical */
   25.30 -static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 
   25.31 +#define set_mfn_from_pfn(pfn, mfn) (phys_to_machine_mapping[(pfn)] = (mfn))
   25.32 +static inline unsigned long get_mfn_from_pfn(unsigned long pfn) 
   25.33  {
   25.34      unsigned long mfn;
   25.35      l1_pgentry_t pte;
   25.36  
   25.37 -    if ( (__copy_from_user(&pte, &__phys_to_machine_mapping[pfn],
   25.38 +    if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn],
   25.39                             sizeof(pte)) == 0) &&
   25.40           (l1e_get_flags(pte) & _PAGE_PRESENT) )
   25.41  	mfn = l1e_get_pfn(pte);
   25.42 @@ -285,7 +288,6 @@ static inline unsigned long phys_to_mach
   25.43      
   25.44      return mfn; 
   25.45  }
   25.46 -#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
   25.47  
   25.48  #ifdef MEMORY_GUARD
   25.49  void memguard_init(void);
    26.1 --- a/xen/include/asm-x86/shadow.h	Tue Aug 30 20:02:59 2005 +0000
    26.2 +++ b/xen/include/asm-x86/shadow.h	Tue Aug 30 20:03:51 2005 +0000
    26.3 @@ -269,14 +269,14 @@ static inline void shadow_mode_disable(s
    26.4  
    26.5  #define __mfn_to_gpfn(_d, mfn)                         \
    26.6      ( (shadow_mode_translate(_d))                      \
    26.7 -      ? machine_to_phys_mapping[(mfn)]                 \
    26.8 +      ? get_pfn_from_mfn(mfn)                                   \
    26.9        : (mfn) )
   26.10  
   26.11  #define __gpfn_to_mfn(_d, gpfn)                        \
   26.12      ({                                                 \
   26.13          ASSERT(current->domain == (_d));               \
   26.14          (shadow_mode_translate(_d))                    \
   26.15 -        ? phys_to_machine_mapping(gpfn)                \
   26.16 +        ? get_mfn_from_pfn(gpfn)                \
   26.17          : (gpfn);                                      \
   26.18      })
   26.19  
   26.20 @@ -461,7 +461,7 @@ static inline int __mark_dirty(struct do
   26.21      // This wants the nice compact set of PFNs from 0..domain's max,
   26.22      // which __mfn_to_gpfn() only returns for translated domains.
   26.23      //
   26.24 -    pfn = machine_to_phys_mapping[mfn];
   26.25 +    pfn = get_pfn_from_mfn(mfn);
   26.26  
   26.27      /*
   26.28       * Values with the MSB set denote MFNs that aren't really part of the 
   26.29 @@ -562,7 +562,7 @@ update_hl2e(struct vcpu *v, unsigned lon
   26.30      old_hl2e = v->arch.hl2_vtable[index];
   26.31  
   26.32      if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
   26.33 -         VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
   26.34 +         VALID_MFN(mfn = get_mfn_from_pfn(l2e_get_pfn(gl2e))) )
   26.35          new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
   26.36      else
   26.37          new_hl2e = l1e_empty();
    27.1 --- a/xen/include/asm-x86/shadow_64.h	Tue Aug 30 20:02:59 2005 +0000
    27.2 +++ b/xen/include/asm-x86/shadow_64.h	Tue Aug 30 20:03:51 2005 +0000
    27.3 @@ -138,7 +138,7 @@ static inline pgentry_64_t *__entry(
    27.4              return NULL;
    27.5          mfn = entry_get_value(*le_e) >> PAGE_SHIFT;
    27.6          if ((flag & GUEST_ENTRY) && shadow_mode_translate(d))
    27.7 -            mfn = phys_to_machine_mapping(mfn);
    27.8 +            mfn = get_mfn_from_pfn(mfn);
    27.9          le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT);
   27.10          index = table_offset_64(va, (level + i - 1));
   27.11          le_e = &le_p[index];
   27.12 @@ -257,7 +257,7 @@ static inline void *  __guest_set_l1e(
   27.13                  if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
   27.14                      return NULL;
   27.15  
   27.16 -                l1mfn = phys_to_machine_mapping(
   27.17 +                l1mfn = get_mfn_from_pfn(
   27.18                    l2e_get_pfn(gl2e));
   27.19  
   27.20                  l1va = (l1_pgentry_32_t *)
   27.21 @@ -299,7 +299,7 @@ static inline void *  __guest_get_l1e(
   27.22                      return NULL;
   27.23  
   27.24  
   27.25 -                l1mfn = phys_to_machine_mapping(
   27.26 +                l1mfn = get_mfn_from_pfn(
   27.27                    l2e_get_pfn(gl2e));
   27.28                  l1va = (l1_pgentry_32_t *) phys_to_virt(
   27.29                    l1mfn << L1_PAGETABLE_SHIFT);
    28.1 --- a/xen/include/asm-x86/vmx_platform.h	Tue Aug 30 20:02:59 2005 +0000
    28.2 +++ b/xen/include/asm-x86/vmx_platform.h	Tue Aug 30 20:03:51 2005 +0000
    28.3 @@ -91,6 +91,6 @@ extern int vmx_setup_platform(struct vcp
    28.4  extern void vmx_io_assist(struct vcpu *v);
    28.5  
    28.6  // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
    28.7 -#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
    28.8 +#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_pfn((gpa) >> PAGE_SHIFT)))
    28.9  
   28.10  #endif
    29.1 --- a/xen/include/xen/perfc.h	Tue Aug 30 20:02:59 2005 +0000
    29.2 +++ b/xen/include/xen/perfc.h	Tue Aug 30 20:03:51 2005 +0000
    29.3 @@ -4,6 +4,7 @@
    29.4  
    29.5  #ifdef PERF_COUNTERS
    29.6  
    29.7 +#include <xen/lib.h>
    29.8  #include <asm/atomic.h>
    29.9  
   29.10  /* 
   29.11 @@ -87,7 +88,7 @@ extern struct perfcounter perfcounters;
   29.12   * Histogram: special treatment for 0 and 1 count. After that equally spaced 
   29.13   * with last bucket taking the rest.
   29.14   */
   29.15 -#ifdef PERFC_ARRAYS
   29.16 +#ifdef PERF_ARRAYS
   29.17  #define perfc_incr_histo(_x,_v,_n)                                          \
   29.18      do {                                                                    \
   29.19          if ( (_v) == 0 )                                                    \