ia64/xen-unstable

changeset 4647:f9f8f250228f

bitkeeper revision 1.1375 (426a2efcnNwCi2J-ptewqWF_62U1-g)

xenpmap.h, hypervisor.h, xen_machdep.c, pmap.c, machdep.c:
Writable pagetables for freebsd.
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 23 11:18:20 2005 +0000 (2005-04-23)
parents 7317dcb7ef66
children 5758f7910e00
files freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h
line diff
     1.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c	Sat Apr 23 11:10:11 2005 +0000
     1.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c	Sat Apr 23 11:18:20 2005 +0000
     1.3 @@ -1374,7 +1374,6 @@ extern unsigned long cpu0prvpage;
     1.4  extern unsigned long *SMPpt;
     1.5  pteinfo_t *pteinfo_list;
     1.6  unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0));
     1.7 -pt_entry_t *KPTphysv;
     1.8  int preemptable;
     1.9  int gdt_set;
    1.10  
    1.11 @@ -1386,8 +1385,9 @@ void
    1.12  initvalues(start_info_t *startinfo)
    1.13  { 
    1.14      int i;
    1.15 +    vm_paddr_t pdir_shadow_ma, KPTphys;
    1.16  #ifdef WRITABLE_PAGETABLES
    1.17 -    XENPRINTF("using writable pagetables\n");
    1.18 +    printk("using writable pagetables\n");
    1.19      HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
    1.20  #endif
    1.21  
    1.22 @@ -1398,18 +1398,17 @@ initvalues(start_info_t *startinfo)
    1.23      /* pre-zero unused mapped pages */
    1.24      bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 - tmpindex)*PAGE_SIZE); 
    1.25      IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
    1.26 -    KPTphysv = (pt_entry_t *)(startinfo->pt_base + PAGE_SIZE);
    1.27 +    KPTphys = xpmap_ptom(__pa(startinfo->pt_base + PAGE_SIZE));
    1.28      XENPRINTF("IdlePTD %p\n", IdlePTD);
    1.29      XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
    1.30  	      "mod_start: 0x%lx mod_len: 0x%lx\n",
    1.31  	      xen_start_info->nr_pages, xen_start_info->shared_info, 
    1.32  	      xen_start_info->flags, xen_start_info->pt_base, 
    1.33  	      xen_start_info->mod_start, xen_start_info->mod_len);
    1.34 -    
    1.35 -    /* setup self-referential mapping first so vtomach will work */
    1.36 -    xen_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 
    1.37 -			PG_V | PG_A);
    1.38 -    xen_flush_queue();
    1.39 +
    1.40 +
    1.41 +
    1.42 +
    1.43      /* Map proc0's UPAGES */
    1.44      proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    1.45      tmpindex += UAREA_PAGES;
    1.46 @@ -1439,9 +1438,11 @@ initvalues(start_info_t *startinfo)
    1.47      /* map SMP page table RO */
    1.48      PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW);
    1.49  
    1.50 -    /* put the page table into the pde */
    1.51 -    xen_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
    1.52 -
    1.53 +    /* put the page table into the page directory */
    1.54 +    xen_queue_pt_update((vm_paddr_t)(IdlePTD + MPPTDI), 
    1.55 +			xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
    1.56 +    xen_queue_pt_update(pdir_shadow_ma + MPPTDI*sizeof(vm_paddr_t), 
    1.57 +			xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_V | PG_A);
    1.58      tmpindex++;
    1.59  #endif
    1.60  
    1.61 @@ -1454,16 +1455,34 @@ initvalues(start_info_t *startinfo)
    1.62  #endif
    1.63      /* unmap remaining pages from initial 4MB chunk */
    1.64      for (i = tmpindex; i%1024 != 0; i++) 
    1.65 -	PT_CLEAR_VA(KPTphysv + i, TRUE);
    1.66 +	xen_queue_pt_update(KPTphys + i*sizeof(vm_paddr_t), 0);
    1.67 +    xen_flush_queue();
    1.68 +    
    1.69 +    pdir_shadow_ma = xpmap_ptom(tmpindex << PAGE_SHIFT);
    1.70 +    tmpindex++;
    1.71 +
    1.72 +    /* setup shadow mapping first so vtomach will work */
    1.73 +    xen_pt_pin((vm_paddr_t)pdir_shadow_ma);
    1.74 +    xen_queue_pt_update((vm_paddr_t)(IdlePTD + PTDPTDI), 
    1.75 +			pdir_shadow_ma | PG_V | PG_A | PG_RW | PG_M);
    1.76 +    xen_queue_pt_update(pdir_shadow_ma + PTDPTDI*sizeof(vm_paddr_t), 
    1.77 +			((vm_paddr_t)IdlePTD) | PG_V | PG_A);
    1.78 +    xen_queue_pt_update(pdir_shadow_ma + KPTDI*sizeof(vm_paddr_t), 
    1.79 +			KPTphys | PG_V | PG_A);
    1.80  
    1.81      /* allocate remainder of NKPT pages */
    1.82 -    for (i = 0; i < NKPT-1; i++, tmpindex++)
    1.83 -	PD_SET_VA(((unsigned long *)startinfo->pt_base) + KPTDI + i + 1, (tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A, TRUE);
    1.84 +    for (i = 0; i < NKPT-1; i++, tmpindex++) {
    1.85 +	xen_queue_pt_update((vm_paddr_t)(IdlePTD + KPTDI + i + 1), 
    1.86 +			    xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A));
    1.87 +	xen_queue_pt_update(pdir_shadow_ma + (KPTDI + i + 1)*sizeof(vm_paddr_t), 
    1.88 +			    xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_V | PG_A));
    1.89 +    }
    1.90      tmpindex += NKPT-1;
    1.91      PT_UPDATES_FLUSH();
    1.92  
    1.93      HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    1.94 -    PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
    1.95 +    PT_SET_MA(HYPERVISOR_shared_info, 
    1.96 +	      xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
    1.97      tmpindex++;
    1.98  
    1.99      HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned long)xen_phys_machine;
   1.100 @@ -1572,10 +1591,9 @@ init386(void)
   1.101  
   1.102  	PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
   1.103  	gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
   1.104 -	if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) {
   1.105 -	    XENPRINTF("set_gdt failed\n");
   1.106 -
   1.107 -	}
   1.108 +	if ((error = HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1))) 
   1.109 +	    panic("set_gdt failed");
   1.110 +	
   1.111  	lgdt_finish();
   1.112  	gdt_set = 1;
   1.113  
     2.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c	Sat Apr 23 11:10:11 2005 +0000
     2.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c	Sat Apr 23 11:18:20 2005 +0000
     2.3 @@ -273,6 +273,7 @@ static int pmap_remove_pte(pmap_t pmap, 
     2.4  static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
     2.5  static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
     2.6  					vm_offset_t va);
     2.7 +static void pmap_copy_ma(vm_paddr_t src, vm_paddr_t dst);
     2.8  static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
     2.9  
    2.10  static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
    2.11 @@ -300,6 +301,32 @@ static void pmap_mark_unprivileged(unsig
    2.12  static void pmap_dec_ref_page(vm_page_t m);
    2.13  int pmap_pid_dump(int pid);
    2.14  #endif
    2.15 +
    2.16 +void 
    2.17 +pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type)
    2.18 +{
    2.19 +	vm_paddr_t shadow_pdir_ma = pmap->pm_pdir[PTDPTDI] & ~0xFFF;
    2.20 +	vm_paddr_t shadow_offset = (vm_paddr_t)(ptr - pmap->pm_pdir)*sizeof(vm_paddr_t);
    2.21 +	
    2.22 +	switch (type) {
    2.23 +	case SH_PD_SET_VA:
    2.24 +		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
    2.25 +				    xpmap_ptom(val & ~(PG_RW|PG_M)));
    2.26 +		xen_queue_pt_update(vtomach(ptr),
    2.27 +				    xpmap_ptom(val)); 	
    2.28 +		break;
    2.29 +	case SH_PD_SET_VA_MA:
    2.30 +		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
    2.31 +				    val & ~(PG_RW|PG_M));
    2.32 +		xen_queue_pt_update(vtomach(ptr), val); 	
    2.33 +		break;
    2.34 +	case SH_PD_SET_VA_CLEAR:
    2.35 +		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 0);
    2.36 +		xen_queue_pt_update(vtomach(ptr), 0); 	
    2.37 +		break;
    2.38 +	}
    2.39 +}
    2.40 +
    2.41  /*
    2.42   * Move the kernel virtual free pointer to the next
    2.43   * 4MB.  This is used to help improve performance
    2.44 @@ -335,7 +362,6 @@ pmap_bootstrap(firstaddr, loadaddr)
    2.45  {
    2.46  	vm_offset_t va;
    2.47  	pt_entry_t *pte, *unused;
    2.48 -	int i;
    2.49  
    2.50  	/*
    2.51  	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
    2.52 @@ -416,8 +442,6 @@ pmap_bootstrap(firstaddr, loadaddr)
    2.53  	PT_CLEAR_VA(CMAP1, FALSE);
    2.54  	PT_CLEAR_VA(CMAP2, FALSE);
    2.55  
    2.56 -	for (i = 0; i < NKPT; i++)
    2.57 -		PD_CLEAR_VA(&PTD[i], FALSE);
    2.58  	PT_UPDATES_FLUSH();
    2.59  #ifdef XEN_UNNEEDED
    2.60  	/* Turn on PG_G on kernel page(s) */
    2.61 @@ -767,7 +791,7 @@ pmap_invalidate_all(pmap_t pmap)
    2.62  static __inline int
    2.63  pmap_is_current(pmap_t pmap)
    2.64  {
    2.65 -
    2.66 +    /* XXX validate */
    2.67  	return (pmap == kernel_pmap ||
    2.68  		(pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
    2.69  	    (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
    2.70 @@ -794,7 +818,7 @@ pmap_pte(pmap_t pmap, vm_offset_t va)
    2.71  		newpf = PT_GET(pde) & PG_FRAME;
    2.72  		tmppf = PT_GET(PMAP2) & PG_FRAME;
    2.73  		if (tmppf != newpf) {
    2.74 -			PD_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
    2.75 +		        PT_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
    2.76  			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
    2.77  		}
    2.78  		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
    2.79 @@ -853,7 +877,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t 
    2.80  		newpf = PT_GET(pde) & PG_FRAME;
    2.81  		tmppf = PT_GET(PMAP1) & PG_FRAME;
    2.82  		if (tmppf != newpf) {
    2.83 -			PD_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
    2.84 +			PT_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
    2.85  #ifdef SMP
    2.86  			PMAP1cpu = PCPU_GET(cpuid);
    2.87  #endif
    2.88 @@ -1088,7 +1112,7 @@ static int
    2.89  	 * unmap the page table page
    2.90  	 */
    2.91  	xen_pt_unpin(pmap->pm_pdir[m->pindex]);
    2.92 -	PD_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE);
    2.93 +	PD_CLEAR_VA(pmap, &pmap->pm_pdir[m->pindex], TRUE);
    2.94  	--pmap->pm_stats.resident_count;
    2.95  
    2.96  	/*
    2.97 @@ -1146,8 +1170,8 @@ pmap_pinit0(pmap)
    2.98  void
    2.99  pmap_pinit(struct pmap *pmap)
   2.100  {
   2.101 -	vm_page_t m, ptdpg[NPGPTD];
   2.102 -	vm_paddr_t ma;
   2.103 +	vm_page_t m, ptdpg[NPGPTD*2];
   2.104 +	vm_paddr_t ma, ma_shadow;
   2.105  	static int color;
   2.106  	int i;
   2.107  
   2.108 @@ -1173,7 +1197,7 @@ pmap_pinit(struct pmap *pmap)
   2.109  	/*
   2.110  	 * allocate the page directory page(s)
   2.111  	 */
   2.112 -	for (i = 0; i < NPGPTD;) {
   2.113 +	for (i = 0; i < NPGPTD*2;) {
   2.114  		m = vm_page_alloc(NULL, color++,
   2.115  		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
   2.116  		    VM_ALLOC_ZERO);
   2.117 @@ -1184,36 +1208,51 @@ pmap_pinit(struct pmap *pmap)
   2.118  			ptdpg[i++] = m;
   2.119  		}
   2.120  	}
   2.121 +#ifdef PAE
   2.122 +	#error "missing shadow handling for PAE"
   2.123 +#endif
   2.124  
   2.125  	pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
   2.126  
   2.127 -	for (i = 0; i < NPGPTD; i++) {
   2.128 -		if ((ptdpg[i]->flags & PG_ZERO) == 0)
   2.129 -			bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
   2.130 -	}
   2.131 -
   2.132  	mtx_lock_spin(&allpmaps_lock);
   2.133  	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
   2.134  	mtx_unlock_spin(&allpmaps_lock);
   2.135  	/* Wire in kernel global address entries. */
   2.136  	/* XXX copies current process, does not fill in MPPTDI */
   2.137 -	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
   2.138 +	bcopy(kernel_pmap->pm_pdir + KPTDI, pmap->pm_pdir + KPTDI, 
   2.139 +	      nkpt * sizeof(pd_entry_t));
   2.140 +	/* XXX need to copy global address entries to page directory's L1 shadow */
   2.141 +	ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD]));
   2.142 +	/* L1 pin shadow page director{y,ies} */
   2.143 +	for (i = 0; i < NPGPTD; i++) {
   2.144 +		ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD + i]));
   2.145 +	        pmap_copy_ma(kernel_pmap->pm_pdir[PTDPTDI + i] & ~(PG_RW|PG_M), ma);
   2.146 +		xen_pt_pin(ma);
   2.147 +	}
   2.148 +
   2.149  #ifdef SMP
   2.150 -	pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
   2.151 +	pmap->pm_pdir[MPPTDI] = kernel_pmap->pm_pdir[MPPTDI];
   2.152  #endif
   2.153  
   2.154 -	/* install self-referential address mapping entry(s) */
   2.155 +	/* pin and install L1 shadow */
   2.156  	for (i = 0; i < NPGPTD; i++) {
   2.157  		ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
   2.158 -		pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A | PG_M;
   2.159 +		ma_shadow = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD+i]));
   2.160 +		/* re-map page directory read-only and pin */
   2.161 +		PT_SET_MA(pmap->pm_pdir + i*PAGE_SIZE, ma | PG_V | PG_A);
   2.162 +		xen_pgd_pin(ma);
   2.163 +		/* add L1 shadow of L2 */
   2.164 +		xen_queue_pt_update(vtomach(&pmap->pm_pdir[PTDPTDI + i]), 
   2.165 +				    ma_shadow | PG_V | PG_A); 
   2.166 +		xen_queue_pt_update(ma_shadow + PTDPTDI*sizeof(vm_paddr_t), 
   2.167 +				    vtomach(pmap->pm_pdir) | PG_V | PG_A);
   2.168 +
   2.169  #ifdef PAE
   2.170 +		#error "unsupported currently"
   2.171  		pmap->pm_pdpt[i] = ma | PG_V;
   2.172  #endif
   2.173 -		/* re-map page directory read-only */
   2.174 -		PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A);
   2.175 -		xen_pgd_pin(ma);
   2.176 -
   2.177  	}
   2.178 +	xen_flush_queue();
   2.179  
   2.180  	pmap->pm_active = 0;
   2.181  	TAILQ_INIT(&pmap->pm_pvlist);
   2.182 @@ -1265,8 +1304,8 @@ static vm_page_t
   2.183  
   2.184  	ptepa = VM_PAGE_TO_PHYS(m);
   2.185  	xen_pt_pin(xpmap_ptom(ptepa));
   2.186 -	PD_SET_VA(&pmap->pm_pdir[ptepindex], 
   2.187 -		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
   2.188 +	PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex], 
   2.189 +		(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
   2.190  
   2.191  	return m;
   2.192  }
   2.193 @@ -1423,7 +1462,7 @@ pmap_lazyfix(pmap_t pmap)
   2.194  void
   2.195  pmap_release(pmap_t pmap)
   2.196  {
   2.197 -	vm_page_t m, ptdpg[NPGPTD];
   2.198 +	vm_page_t m, ptdpg[NPGPTD + 1];
   2.199  	vm_paddr_t ma;
   2.200  	int i;
   2.201  
   2.202 @@ -1436,25 +1475,27 @@ pmap_release(pmap_t pmap)
   2.203  	LIST_REMOVE(pmap, pm_list);
   2.204  	mtx_unlock_spin(&allpmaps_lock);
   2.205  
   2.206 -	for (i = 0; i < NPGPTD; i++)
   2.207 +	for (i = 0; i < NPGPTD; i++) {
   2.208  		ptdpg[i] = PHYS_TO_VM_PAGE(PT_GET(&pmap->pm_pdir[PTDPTDI + i]));
   2.209 -
   2.210 -	for (i = 0; i < nkpt + NPGPTD; i++)
   2.211 -		PD_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE);
   2.212 +	}
   2.213 +	ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir));
   2.214 +	for (i = 0; i < nkpt + NPGPTD; i++) 
   2.215 +		PD_CLEAR_VA(pmap, &pmap->pm_pdir[PTDPTDI + i], FALSE);
   2.216  
   2.217  	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
   2.218  	    sizeof(*pmap->pm_pdir));
   2.219  #ifdef SMP
   2.220 -	PD_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE);
   2.221 +	PD_CLEAR_VA(pmap, &pmap->pm_pdir[MPPTDI], FALSE);
   2.222  #endif
   2.223  	PT_UPDATES_FLUSH();
   2.224  	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
   2.225  
   2.226  	vm_page_lock_queues();
   2.227 -	for (i = 0; i < NPGPTD; i++) {
   2.228 +	for (i = 0; i < NPGPTD + 1; i++) {
   2.229  		m = ptdpg[i];
   2.230  		
   2.231  		ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
   2.232 +		/* unpinning L1 and L2 treated the same */
   2.233                  xen_pgd_unpin(ma);
   2.234  #ifdef PAE
   2.235  		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
   2.236 @@ -1530,12 +1571,12 @@ pmap_growkernel(vm_offset_t addr)
   2.237  		pmap_zero_page(nkpg);
   2.238  		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
   2.239  		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
   2.240 -		PD_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE);
   2.241 +		PD_SET_VA(kernel_pmap, &pdir_pde(kernel_pmap->pm_pdir, kernel_vm_end), newpdir, TRUE);
   2.242  
   2.243  		mtx_lock_spin(&allpmaps_lock);
   2.244  		LIST_FOREACH(pmap, &allpmaps, pm_list) {
   2.245  			pde = pmap_pde(pmap, kernel_vm_end);
   2.246 -			PD_SET_VA(pde, newpdir, FALSE);
   2.247 +			PD_SET_VA(pmap, pde, newpdir, FALSE);
   2.248  		}
   2.249  		PT_UPDATES_FLUSH();
   2.250  		mtx_unlock_spin(&allpmaps_lock);
   2.251 @@ -1752,7 +1793,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
   2.252  		 * Check for large page.
   2.253  		 */
   2.254  		if ((ptpaddr & PG_PS) != 0) {
   2.255 -			PD_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE);
   2.256 +			PD_CLEAR_VA(pmap, &pmap->pm_pdir[pdirindex], TRUE);
   2.257  			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
   2.258  			anyvalid = 1;
   2.259  			continue;
   2.260 @@ -2326,7 +2367,7 @@ retry:
   2.261  		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
   2.262  		npdes = size >> PDRSHIFT;
   2.263  		for(i = 0; i < npdes; i++) {
   2.264 -			PD_SET_VA(&pmap->pm_pdir[ptepindex],
   2.265 +			PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex],
   2.266  			    ptepa | PG_U | PG_RW | PG_V | PG_PS, FALSE);
   2.267  			ptepa += NBPDR;
   2.268  			ptepindex += 1;
   2.269 @@ -2458,7 +2499,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
   2.270  			
   2.271  		if (srcptepaddr & PG_PS) {
   2.272  			if (dst_pmap->pm_pdir[ptepindex] == 0) {
   2.273 -				PT_SET_VA(&dst_pmap->pm_pdir[ptepindex], srcptepaddr, TRUE);
   2.274 +				PD_SET_VA(dst_pmap, &dst_pmap->pm_pdir[ptepindex], srcptepaddr, TRUE);
   2.275  				dst_pmap->pm_stats.resident_count +=
   2.276  				    NBPDR / PAGE_SIZE;
   2.277  			}
   2.278 @@ -2629,6 +2670,32 @@ pmap_copy_page(vm_page_t src, vm_page_t 
   2.279  	mtx_unlock(&CMAPCADDR12_lock);
   2.280  }
   2.281  
   2.282 +void
   2.283 +pmap_copy_ma(vm_paddr_t src, vm_paddr_t dst)
   2.284 +{
   2.285 +
   2.286 +	mtx_lock(&CMAPCADDR12_lock);
   2.287 +	if (*CMAP1)
   2.288 +		panic("pmap_copy_ma: CMAP1 busy");
   2.289 +	if (*CMAP2)
   2.290 +		panic("pmap_copy_ma: CMAP2 busy");
   2.291 +	sched_pin();
   2.292 +#ifdef I386_CPU
   2.293 +	invltlb();
   2.294 +#else
   2.295 +	invlpg((u_int)CADDR1);
   2.296 +	invlpg((u_int)CADDR2);
   2.297 +#endif
   2.298 +	PT_SET_VA_MA(CMAP1, PG_V | src | PG_A, FALSE);
   2.299 +	PT_SET_VA_MA(CMAP2, PG_V | PG_RW | dst | PG_A | PG_M, TRUE);
   2.300 +
   2.301 +	bcopy(CADDR1, CADDR2, PAGE_SIZE);
   2.302 +	PT_CLEAR_VA(CMAP1, FALSE);
   2.303 +	PT_CLEAR_VA(CMAP2, TRUE);
   2.304 +	sched_unpin();
   2.305 +	mtx_unlock(&CMAPCADDR12_lock);
   2.306 +}
   2.307 +
   2.308  /*
   2.309   * Returns true if the pmap's pv is one of the first
   2.310   * 16 pvs linked to from this page.  This count may
   2.311 @@ -2807,7 +2874,11 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
   2.312  	boolean_t rv;
   2.313  
   2.314  	rv = FALSE;
   2.315 -
   2.316 +	/* XXX 
   2.317 +	 * in order for writable pagetables to help, 
   2.318 +	 * this has to work - check if we aren't doing 
   2.319 +	 * an invlpg on the page tables linear mappings
   2.320 +	 */  
   2.321  	return (rv);
   2.322  	PMAP_LOCK(pmap);
   2.323  	if (pmap_pde(pmap, addr)) {
   2.324 @@ -3396,3 +3467,9 @@ pmap_pvdump(pa)
   2.325  	printf(" ");
   2.326  }
   2.327  #endif
   2.328 +
   2.329 +/*
   2.330 + * Local variables:
   2.331 + * c-basic-offset: 8
   2.332 + * End:
   2.333 + */
     3.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c	Sat Apr 23 11:10:11 2005 +0000
     3.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c	Sat Apr 23 11:18:20 2005 +0000
     3.3 @@ -474,7 +474,7 @@ xen_machphys_update(unsigned long mfn, u
     3.4  }
     3.5  
     3.6  void
     3.7 -xen_queue_pt_update(pt_entry_t *ptr, pt_entry_t val)
     3.8 +xen_queue_pt_update(vm_paddr_t ptr, vm_paddr_t val)
     3.9  {
    3.10      SET_VCPU();
    3.11      
     4.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h	Sat Apr 23 11:10:11 2005 +0000
     4.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h	Sat Apr 23 11:18:20 2005 +0000
     4.3 @@ -88,11 +88,13 @@ static inline int
     4.4  HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
     4.5  {
     4.6      int ret;
     4.7 +    unsigned long ign1, ign2;
     4.8 +
     4.9      __asm__ __volatile__ (
    4.10          TRAP_INSTR
    4.11 -        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
    4.12 -        "b" (frame_list), "c" (entries) : "memory" );
    4.13 -
    4.14 +        : "=a" (ret), "=b" (ign1), "=c" (ign2)
    4.15 +        : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
    4.16 +        : "memory" );
    4.17  
    4.18      return ret;
    4.19  }
     5.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h	Sat Apr 23 11:10:11 2005 +0000
     5.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h	Sat Apr 23 11:18:20 2005 +0000
     5.3 @@ -35,7 +35,7 @@
     5.4  #define _XEN_XENPMAP_H_
     5.5  #include <machine/xenvar.h>
     5.6  void xen_invlpg(vm_offset_t);
     5.7 -void xen_queue_pt_update(pt_entry_t *, pt_entry_t);
     5.8 +void xen_queue_pt_update(vm_paddr_t, vm_paddr_t);
     5.9  void xen_pt_switch(uint32_t);
    5.10  void xen_set_ldt(unsigned long, unsigned long);
    5.11  void xen_tlb_flush(void);
    5.12 @@ -59,8 +59,8 @@ void pmap_ref(pt_entry_t *pte, unsigned 
    5.13  #define PMAP_DEC_REF_PAGE(a)
    5.14  #endif
    5.15  
    5.16 +#define WRITABLE_PAGETABLES
    5.17  #define ALWAYS_SYNC 0
    5.18 -#define PT_DEBUG
    5.19  
    5.20  #ifdef PT_DEBUG
    5.21  #define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__) 
    5.22 @@ -70,6 +70,13 @@ void pmap_ref(pt_entry_t *pte, unsigned 
    5.23  
    5.24  #define pmap_valid_entry(E)           ((E) & PG_V) /* is PDE or PTE valid? */
    5.25  
    5.26 +#define SH_PD_SET_VA        1
    5.27 +#define SH_PD_SET_VA_MA     2
    5.28 +#define SH_PD_SET_VA_CLEAR  3
    5.29 +
    5.30 +struct pmap;
    5.31 +void pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type);
    5.32 +
    5.33  #define	PT_GET(_ptp)						\
    5.34  	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
    5.35  
    5.36 @@ -90,20 +97,19 @@ void pmap_ref(pt_entry_t *pte, unsigned 
    5.37          *(_ptp) = 0;                                            \
    5.38  } while (/*CONSTCOND*/0)
    5.39  
    5.40 -#define PD_SET_VA(_ptp,_npte,sync) do {				\
    5.41 +#define PD_SET_VA(_pmap, _ptp, _npte, sync) do {			\
    5.42          PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
    5.43 -	xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), 	\
    5.44 -			    xpmap_ptom((_npte))); 		\
    5.45 +        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA);           \
    5.46  	if (sync || ALWAYS_SYNC) xen_flush_queue();     	\
    5.47  } while (/*CONSTCOND*/0)
    5.48 -#define PD_SET_VA_MA(_ptp,_npte,sync) do {		        \
    5.49 +#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do {		\
    5.50          PMAP_REF((_ptp), (_npte));                              \
    5.51 -	xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \
    5.52 +        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA);        \
    5.53  	if (sync || ALWAYS_SYNC) xen_flush_queue();		\
    5.54  } while (/*CONSTCOND*/0)
    5.55 -#define PD_CLEAR_VA(_ptp, sync) do {				\
    5.56 +#define PD_CLEAR_VA(_pmap, _ptp, sync) do {			\
    5.57          PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
    5.58 -	xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0);	\
    5.59 +        pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR);     \
    5.60  	if (sync || ALWAYS_SYNC) xen_flush_queue();		\
    5.61  } while (/*CONSTCOND*/0)
    5.62  
    5.63 @@ -128,9 +134,21 @@ void pmap_ref(pt_entry_t *pte, unsigned 
    5.64  		xen_flush_queue();				\
    5.65  } while (/*CONSTCOND*/0)
    5.66  
    5.67 -#define PD_SET_VA    PT_SET_VA
    5.68 -#define PD_SET_VA_MA PT_SET_VA_MA
    5.69 -#define PD_CLEAR_VA  PT_CLEAR_VA
    5.70 +#define PD_SET_VA(_pmap, _ptp,_npte,sync) do {			\
    5.71 +        PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
    5.72 +        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA);           \
    5.73 +	if (sync || ALWAYS_SYNC) xen_flush_queue();     	\
    5.74 +} while (/*CONSTCOND*/0)
    5.75 +#define PD_SET_VA_MA(_pmap, _ptp,_npte,sync) do {		\
    5.76 +        PMAP_REF((_ptp), (_npte));                              \
    5.77 +        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA);        \
    5.78 +	if (sync || ALWAYS_SYNC) xen_flush_queue();		\
    5.79 +} while (/*CONSTCOND*/0)
    5.80 +#define PD_CLEAR_VA(_pmap, _ptp, sync) do {			\
    5.81 +        PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
    5.82 +        pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR);          \
    5.83 +	if (sync || ALWAYS_SYNC) xen_flush_queue();		\
    5.84 +} while (/*CONSTCOND*/0)
    5.85  
    5.86  #endif
    5.87