ia64/xen-unstable

changeset 6243:1a94949348ff

Fix range_straddles_boundary() check to exclude regions that
were allocated with xen_create_contiguous_region(). Such areas
do not require bounce buffers (swiotlb).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 18 10:41:55 2005 +0000 (2005-08-18)
parents aa8abf007260
children cb5648ba5821
files linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6-xen-sparse/arch/xen/i386/mm/init.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Thu Aug 18 10:22:08 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Thu Aug 18 10:41:55 2005 +0000
     1.3 @@ -59,124 +59,124 @@
     1.4  #ifndef CONFIG_XEN_SHADOW_MODE
     1.5  void xen_l1_entry_update(pte_t *ptr, pte_t val)
     1.6  {
     1.7 -    mmu_update_t u;
     1.8 -    u.ptr = virt_to_machine(ptr);
     1.9 -    u.val = pte_val_ma(val);
    1.10 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.11 +	mmu_update_t u;
    1.12 +	u.ptr = virt_to_machine(ptr);
    1.13 +	u.val = pte_val_ma(val);
    1.14 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.15  }
    1.16  
    1.17  void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
    1.18  {
    1.19 -    mmu_update_t u;
    1.20 -    u.ptr = virt_to_machine(ptr);
    1.21 -    u.val = pmd_val_ma(val);
    1.22 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.23 +	mmu_update_t u;
    1.24 +	u.ptr = virt_to_machine(ptr);
    1.25 +	u.val = pmd_val_ma(val);
    1.26 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.27  }
    1.28  
    1.29  #ifdef CONFIG_X86_PAE
    1.30  void xen_l3_entry_update(pud_t *ptr, pud_t val)
    1.31  {
    1.32 -    mmu_update_t u;
    1.33 -    u.ptr = virt_to_machine(ptr);
    1.34 -    u.val = pud_val_ma(val);
    1.35 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.36 +	mmu_update_t u;
    1.37 +	u.ptr = virt_to_machine(ptr);
    1.38 +	u.val = pud_val_ma(val);
    1.39 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.40  }
    1.41  #endif
    1.42  
    1.43  #ifdef CONFIG_X86_64
    1.44  void xen_l3_entry_update(pud_t *ptr, pud_t val)
    1.45  {
    1.46 -    mmu_update_t u;
    1.47 -    u.ptr = virt_to_machine(ptr);
    1.48 -    u.val = val.pud;
    1.49 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.50 +	mmu_update_t u;
    1.51 +	u.ptr = virt_to_machine(ptr);
    1.52 +	u.val = val.pud;
    1.53 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.54  }
    1.55  
    1.56  void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
    1.57  {
    1.58 -    mmu_update_t u;
    1.59 -    u.ptr = virt_to_machine(ptr);
    1.60 -    u.val = val.pgd;
    1.61 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.62 +	mmu_update_t u;
    1.63 +	u.ptr = virt_to_machine(ptr);
    1.64 +	u.val = val.pgd;
    1.65 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.66  }
    1.67  #endif /* CONFIG_X86_64 */
    1.68  #endif /* CONFIG_XEN_SHADOW_MODE */
    1.69  
    1.70  void xen_machphys_update(unsigned long mfn, unsigned long pfn)
    1.71  {
    1.72 -    mmu_update_t u;
    1.73 -    u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    1.74 -    u.val = pfn;
    1.75 -    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.76 +	mmu_update_t u;
    1.77 +	u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    1.78 +	u.val = pfn;
    1.79 +	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
    1.80  }
    1.81  
    1.82  void xen_pt_switch(unsigned long ptr)
    1.83  {
    1.84 -    struct mmuext_op op;
    1.85 -    op.cmd = MMUEXT_NEW_BASEPTR;
    1.86 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    1.87 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    1.88 +	struct mmuext_op op;
    1.89 +	op.cmd = MMUEXT_NEW_BASEPTR;
    1.90 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    1.91 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    1.92  }
    1.93  
    1.94  void xen_new_user_pt(unsigned long ptr)
    1.95  {
    1.96 -    struct mmuext_op op;
    1.97 -    op.cmd = MMUEXT_NEW_USER_BASEPTR;
    1.98 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    1.99 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.100 +	struct mmuext_op op;
   1.101 +	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1.102 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.103 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.104  }
   1.105  
   1.106  void xen_tlb_flush(void)
   1.107  {
   1.108 -    struct mmuext_op op;
   1.109 -    op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
   1.110 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.111 +	struct mmuext_op op;
   1.112 +	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
   1.113 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.114  }
   1.115  
   1.116  void xen_invlpg(unsigned long ptr)
   1.117  {
   1.118 -    struct mmuext_op op;
   1.119 -    op.cmd = MMUEXT_INVLPG_LOCAL;
   1.120 -    op.linear_addr = ptr & PAGE_MASK;
   1.121 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.122 +	struct mmuext_op op;
   1.123 +	op.cmd = MMUEXT_INVLPG_LOCAL;
   1.124 +	op.linear_addr = ptr & PAGE_MASK;
   1.125 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.126  }
   1.127  
   1.128  #ifdef CONFIG_SMP
   1.129  
   1.130  void xen_tlb_flush_all(void)
   1.131  {
   1.132 -    struct mmuext_op op;
   1.133 -    op.cmd = MMUEXT_TLB_FLUSH_ALL;
   1.134 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.135 +	struct mmuext_op op;
   1.136 +	op.cmd = MMUEXT_TLB_FLUSH_ALL;
   1.137 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.138  }
   1.139  
   1.140  void xen_tlb_flush_mask(cpumask_t *mask)
   1.141  {
   1.142 -    struct mmuext_op op;
   1.143 -    if ( cpus_empty(*mask) )
   1.144 -        return;
   1.145 -    op.cmd = MMUEXT_TLB_FLUSH_MULTI;
   1.146 -    op.vcpumask = mask->bits;
   1.147 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.148 +	struct mmuext_op op;
   1.149 +	if ( cpus_empty(*mask) )
   1.150 +		return;
   1.151 +	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
   1.152 +	op.vcpumask = mask->bits;
   1.153 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.154  }
   1.155  
   1.156  void xen_invlpg_all(unsigned long ptr)
   1.157  {
   1.158 -    struct mmuext_op op;
   1.159 -    op.cmd = MMUEXT_INVLPG_ALL;
   1.160 -    op.linear_addr = ptr & PAGE_MASK;
   1.161 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.162 +	struct mmuext_op op;
   1.163 +	op.cmd = MMUEXT_INVLPG_ALL;
   1.164 +	op.linear_addr = ptr & PAGE_MASK;
   1.165 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.166  }
   1.167  
   1.168  void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
   1.169  {
   1.170 -    struct mmuext_op op;
   1.171 -    if ( cpus_empty(*mask) )
   1.172 -        return;
   1.173 -    op.cmd = MMUEXT_INVLPG_MULTI;
   1.174 -    op.vcpumask = mask->bits;
   1.175 -    op.linear_addr = ptr & PAGE_MASK;
   1.176 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.177 +	struct mmuext_op op;
   1.178 +	if ( cpus_empty(*mask) )
   1.179 +		return;
   1.180 +	op.cmd = MMUEXT_INVLPG_MULTI;
   1.181 +	op.vcpumask = mask->bits;
   1.182 +	op.linear_addr = ptr & PAGE_MASK;
   1.183 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.184  }
   1.185  
   1.186  #endif /* CONFIG_SMP */
   1.187 @@ -184,221 +184,281 @@ void xen_invlpg_mask(cpumask_t *mask, un
   1.188  #ifndef CONFIG_XEN_SHADOW_MODE
   1.189  void xen_pgd_pin(unsigned long ptr)
   1.190  {
   1.191 -    struct mmuext_op op;
   1.192 +	struct mmuext_op op;
   1.193  #ifdef CONFIG_X86_64
   1.194 -    op.cmd = MMUEXT_PIN_L4_TABLE;
   1.195 +	op.cmd = MMUEXT_PIN_L4_TABLE;
   1.196  #elif defined(CONFIG_X86_PAE)
   1.197 -    op.cmd = MMUEXT_PIN_L3_TABLE;
   1.198 +	op.cmd = MMUEXT_PIN_L3_TABLE;
   1.199  #else
   1.200 -    op.cmd = MMUEXT_PIN_L2_TABLE;
   1.201 +	op.cmd = MMUEXT_PIN_L2_TABLE;
   1.202  #endif
   1.203 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.204 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.205 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.206 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.207  }
   1.208  
   1.209  void xen_pgd_unpin(unsigned long ptr)
   1.210  {
   1.211 -    struct mmuext_op op;
   1.212 -    op.cmd = MMUEXT_UNPIN_TABLE;
   1.213 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.214 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.215 +	struct mmuext_op op;
   1.216 +	op.cmd = MMUEXT_UNPIN_TABLE;
   1.217 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.218 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.219  }
   1.220  
   1.221  void xen_pte_pin(unsigned long ptr)
   1.222  {
   1.223 -    struct mmuext_op op;
   1.224 -    op.cmd = MMUEXT_PIN_L1_TABLE;
   1.225 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.226 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.227 +	struct mmuext_op op;
   1.228 +	op.cmd = MMUEXT_PIN_L1_TABLE;
   1.229 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.230 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.231  }
   1.232  
   1.233  void xen_pte_unpin(unsigned long ptr)
   1.234  {
   1.235 -    struct mmuext_op op;
   1.236 -    op.cmd = MMUEXT_UNPIN_TABLE;
   1.237 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.238 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.239 +	struct mmuext_op op;
   1.240 +	op.cmd = MMUEXT_UNPIN_TABLE;
   1.241 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.242 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.243  }
   1.244  
   1.245  #ifdef CONFIG_X86_64
   1.246  void xen_pud_pin(unsigned long ptr)
   1.247  {
   1.248 -    struct mmuext_op op;
   1.249 -    op.cmd = MMUEXT_PIN_L3_TABLE;
   1.250 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.251 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.252 +	struct mmuext_op op;
   1.253 +	op.cmd = MMUEXT_PIN_L3_TABLE;
   1.254 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.255 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.256  }
   1.257  
   1.258  void xen_pud_unpin(unsigned long ptr)
   1.259  {
   1.260 -    struct mmuext_op op;
   1.261 -    op.cmd = MMUEXT_UNPIN_TABLE;
   1.262 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.263 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.264 +	struct mmuext_op op;
   1.265 +	op.cmd = MMUEXT_UNPIN_TABLE;
   1.266 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.267 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.268  }
   1.269  
   1.270  void xen_pmd_pin(unsigned long ptr)
   1.271  {
   1.272 -    struct mmuext_op op;
   1.273 -    op.cmd = MMUEXT_PIN_L2_TABLE;
   1.274 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.275 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.276 +	struct mmuext_op op;
   1.277 +	op.cmd = MMUEXT_PIN_L2_TABLE;
   1.278 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.279 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.280  }
   1.281  
   1.282  void xen_pmd_unpin(unsigned long ptr)
   1.283  {
   1.284 -    struct mmuext_op op;
   1.285 -    op.cmd = MMUEXT_UNPIN_TABLE;
   1.286 -    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.287 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.288 +	struct mmuext_op op;
   1.289 +	op.cmd = MMUEXT_UNPIN_TABLE;
   1.290 +	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   1.291 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.292  }
   1.293  #endif /* CONFIG_X86_64 */
   1.294  #endif /* CONFIG_XEN_SHADOW_MODE */
   1.295  
   1.296  void xen_set_ldt(unsigned long ptr, unsigned long len)
   1.297  {
   1.298 -    struct mmuext_op op;
   1.299 -    op.cmd = MMUEXT_SET_LDT;
   1.300 -    op.linear_addr = ptr;
   1.301 -    op.nr_ents = len;
   1.302 -    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.303 +	struct mmuext_op op;
   1.304 +	op.cmd = MMUEXT_SET_LDT;
   1.305 +	op.linear_addr = ptr;
   1.306 +	op.nr_ents = len;
   1.307 +	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   1.308 +}
   1.309 +
   1.310 +/*
   1.311 + * Bitmap is indexed by page number. If bit is set, the page is part of a
   1.312 + * xen_create_contiguous_region() area of memory.
   1.313 + */
   1.314 +unsigned long *contiguous_bitmap;
   1.315 +
   1.316 +static void contiguous_bitmap_set(
   1.317 +	unsigned long first_page, unsigned long nr_pages)
   1.318 +{
   1.319 +	unsigned long start_off, end_off, curr_idx, end_idx;
   1.320 +
   1.321 +	curr_idx  = first_page / BITS_PER_LONG;
   1.322 +	start_off = first_page & (BITS_PER_LONG-1);
   1.323 +	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
   1.324 +	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
   1.325 +
   1.326 +	if (curr_idx == end_idx) {
   1.327 +		contiguous_bitmap[curr_idx] |=
   1.328 +			((1UL<<end_off)-1) & -(1UL<<start_off);
   1.329 +	} else {
   1.330 +		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
   1.331 +		while ( ++curr_idx < end_idx )
   1.332 +			contiguous_bitmap[curr_idx] = ~0UL;
   1.333 +		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
   1.334 +	}
   1.335 +}
   1.336 +
   1.337 +static void contiguous_bitmap_clear(
   1.338 +	unsigned long first_page, unsigned long nr_pages)
   1.339 +{
   1.340 +	unsigned long start_off, end_off, curr_idx, end_idx;
   1.341 +
   1.342 +	curr_idx  = first_page / BITS_PER_LONG;
   1.343 +	start_off = first_page & (BITS_PER_LONG-1);
   1.344 +	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
   1.345 +	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
   1.346 +
   1.347 +	if (curr_idx == end_idx) {
   1.348 +		contiguous_bitmap[curr_idx] &=
   1.349 +			-(1UL<<end_off) | ((1UL<<start_off)-1);
   1.350 +	} else {
   1.351 +		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
   1.352 +		while ( ++curr_idx != end_idx )
   1.353 +			contiguous_bitmap[curr_idx] = 0;
   1.354 +		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
   1.355 +	}
   1.356  }
   1.357  
   1.358  /* Ensure multi-page extents are contiguous in machine memory. */
   1.359  void xen_create_contiguous_region(unsigned long vstart, unsigned int order)
   1.360  {
   1.361 -    pgd_t         *pgd; 
   1.362 -    pud_t         *pud; 
   1.363 -    pmd_t         *pmd;
   1.364 -    pte_t         *pte;
   1.365 -    unsigned long  mfn, i, flags;
   1.366 +	pgd_t         *pgd; 
   1.367 +	pud_t         *pud; 
   1.368 +	pmd_t         *pmd;
   1.369 +	pte_t         *pte;
   1.370 +	unsigned long  mfn, i, flags;
   1.371  
   1.372 -    scrub_pages(vstart, 1 << order);
   1.373 +	scrub_pages(vstart, 1 << order);
   1.374  
   1.375 -    balloon_lock(flags);
   1.376 +	balloon_lock(flags);
   1.377  
   1.378 -    /* 1. Zap current PTEs, giving away the underlying pages. */
   1.379 -    for (i = 0; i < (1<<order); i++) {
   1.380 -        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.381 -        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.382 -        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.383 -        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.384 -        mfn = pte_mfn(*pte);
   1.385 -        BUG_ON(HYPERVISOR_update_va_mapping(
   1.386 -            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.387 -        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.388 -            INVALID_P2M_ENTRY;
   1.389 -        BUG_ON(HYPERVISOR_dom_mem_op(
   1.390 -            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
   1.391 -    }
   1.392 +	/* 1. Zap current PTEs, giving away the underlying pages. */
   1.393 +	for (i = 0; i < (1<<order); i++) {
   1.394 +		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.395 +		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.396 +		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.397 +		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.398 +		mfn = pte_mfn(*pte);
   1.399 +		BUG_ON(HYPERVISOR_update_va_mapping(
   1.400 +			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.401 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.402 +			INVALID_P2M_ENTRY;
   1.403 +		BUG_ON(HYPERVISOR_dom_mem_op(
   1.404 +			MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
   1.405 +	}
   1.406  
   1.407 -    /* 2. Get a new contiguous memory extent. */
   1.408 -    BUG_ON(HYPERVISOR_dom_mem_op(
   1.409 -	       MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
   1.410 +	/* 2. Get a new contiguous memory extent. */
   1.411 +	BUG_ON(HYPERVISOR_dom_mem_op(
   1.412 +		MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
   1.413  
   1.414 -    /* 3. Map the new extent in place of old pages. */
   1.415 -    for (i = 0; i < (1<<order); i++) {
   1.416 -        BUG_ON(HYPERVISOR_update_va_mapping(
   1.417 -            vstart + (i*PAGE_SIZE),
   1.418 -            __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
   1.419 -        xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
   1.420 -        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
   1.421 -    }
   1.422 +	/* 3. Map the new extent in place of old pages. */
   1.423 +	for (i = 0; i < (1<<order); i++) {
   1.424 +		BUG_ON(HYPERVISOR_update_va_mapping(
   1.425 +			vstart + (i*PAGE_SIZE),
   1.426 +			__pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
   1.427 +		xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
   1.428 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
   1.429 +	}
   1.430  
   1.431 -    flush_tlb_all();
   1.432 +	flush_tlb_all();
   1.433  
   1.434 -    balloon_unlock(flags);
   1.435 +	contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.436 +
   1.437 +	balloon_unlock(flags);
   1.438  }
   1.439  
   1.440  void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
   1.441  {
   1.442 -    pgd_t         *pgd; 
   1.443 -    pud_t         *pud; 
   1.444 -    pmd_t         *pmd;
   1.445 -    pte_t         *pte;
   1.446 -    unsigned long  mfn, i, flags;
   1.447 +	pgd_t         *pgd; 
   1.448 +	pud_t         *pud; 
   1.449 +	pmd_t         *pmd;
   1.450 +	pte_t         *pte;
   1.451 +	unsigned long  mfn, i, flags;
   1.452  
   1.453 -    scrub_pages(vstart, 1 << order);
   1.454 +	scrub_pages(vstart, 1 << order);
   1.455  
   1.456 -    balloon_lock(flags);
   1.457 +	balloon_lock(flags);
   1.458 +
   1.459 +	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.460  
   1.461 -    /* 1. Zap current PTEs, giving away the underlying pages. */
   1.462 -    for (i = 0; i < (1<<order); i++) {
   1.463 -        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.464 -        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.465 -        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.466 -        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.467 -        mfn = pte_mfn(*pte);
   1.468 -        BUG_ON(HYPERVISOR_update_va_mapping(
   1.469 -            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.470 -        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.471 -            INVALID_P2M_ENTRY;
   1.472 -        BUG_ON(HYPERVISOR_dom_mem_op(
   1.473 -            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
   1.474 -    }
   1.475 +	/* 1. Zap current PTEs, giving away the underlying pages. */
   1.476 +	for (i = 0; i < (1<<order); i++) {
   1.477 +		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.478 +		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.479 +		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.480 +		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.481 +		mfn = pte_mfn(*pte);
   1.482 +		BUG_ON(HYPERVISOR_update_va_mapping(
   1.483 +			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.484 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.485 +			INVALID_P2M_ENTRY;
   1.486 +		BUG_ON(HYPERVISOR_dom_mem_op(
   1.487 +			MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
   1.488 +	}
   1.489  
   1.490 -    /* 2. Map new pages in place of old pages. */
   1.491 -    for (i = 0; i < (1<<order); i++) {
   1.492 -        BUG_ON(HYPERVISOR_dom_mem_op(
   1.493 -            MEMOP_increase_reservation, &mfn, 1, 0) != 1);
   1.494 -        BUG_ON(HYPERVISOR_update_va_mapping(
   1.495 -            vstart + (i*PAGE_SIZE),
   1.496 -            __pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
   1.497 -        xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
   1.498 -        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
   1.499 -    }
   1.500 +	/* 2. Map new pages in place of old pages. */
   1.501 +	for (i = 0; i < (1<<order); i++) {
   1.502 +		BUG_ON(HYPERVISOR_dom_mem_op(
   1.503 +			MEMOP_increase_reservation, &mfn, 1, 0) != 1);
   1.504 +		BUG_ON(HYPERVISOR_update_va_mapping(
   1.505 +			vstart + (i*PAGE_SIZE),
   1.506 +			__pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
   1.507 +		xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
   1.508 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
   1.509 +	}
   1.510  
   1.511 -    flush_tlb_all();
   1.512 +	flush_tlb_all();
   1.513  
   1.514 -    balloon_unlock(flags);
   1.515 +	balloon_unlock(flags);
   1.516  }
   1.517  
   1.518  
   1.519  unsigned long allocate_empty_lowmem_region(unsigned long pages)
   1.520  {
   1.521 -    pgd_t         *pgd;
   1.522 -    pud_t         *pud; 
   1.523 -    pmd_t         *pmd;
   1.524 -    pte_t         *pte;
   1.525 -    unsigned long *pfn_array;
   1.526 -    unsigned long  vstart;
   1.527 -    unsigned long  i;
   1.528 -    unsigned int   order = get_order(pages*PAGE_SIZE);
   1.529 +	pgd_t         *pgd;
   1.530 +	pud_t         *pud; 
   1.531 +	pmd_t         *pmd;
   1.532 +	pte_t         *pte;
   1.533 +	unsigned long *pfn_array;
   1.534 +	unsigned long  vstart;
   1.535 +	unsigned long  i;
   1.536 +	unsigned int   order = get_order(pages*PAGE_SIZE);
   1.537  
   1.538 -    vstart = __get_free_pages(GFP_KERNEL, order);
   1.539 -    if ( vstart == 0 )
   1.540 -        return 0UL;
   1.541 +	vstart = __get_free_pages(GFP_KERNEL, order);
   1.542 +	if (vstart == 0)
   1.543 +		return 0UL;
   1.544  
   1.545 -    scrub_pages(vstart, 1 << order);
   1.546 +	scrub_pages(vstart, 1 << order);
   1.547  
   1.548 -    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
   1.549 -    if ( pfn_array == NULL )
   1.550 -        BUG();
   1.551 +	pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
   1.552 +	BUG_ON(pfn_array == NULL);
   1.553  
   1.554 -    for ( i = 0; i < (1<<order); i++ )
   1.555 -    {
   1.556 -        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
   1.557 -        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.558 -        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.559 -        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
   1.560 -        pfn_array[i] = pte_mfn(*pte);
   1.561 +	for (i = 0; i < (1<<order); i++) {
   1.562 +		pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
   1.563 +		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.564 +		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.565 +		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
   1.566 +		pfn_array[i] = pte_mfn(*pte);
   1.567  #ifdef CONFIG_X86_64
   1.568 -        xen_l1_entry_update(pte, __pte(0));
   1.569 +		xen_l1_entry_update(pte, __pte(0));
   1.570  #else
   1.571 -        BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
   1.572 -					    __pte_ma(0), 0));
   1.573 +		BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
   1.574 +						    __pte_ma(0), 0));
   1.575  #endif
   1.576 -        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.577 -            INVALID_P2M_ENTRY;
   1.578 -    }
   1.579 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
   1.580 +			INVALID_P2M_ENTRY;
   1.581 +	}
   1.582  
   1.583 -    flush_tlb_all();
   1.584 +	flush_tlb_all();
   1.585  
   1.586 -    balloon_put_pages(pfn_array, 1 << order);
   1.587 +	balloon_put_pages(pfn_array, 1 << order);
   1.588  
   1.589 -    vfree(pfn_array);
   1.590 +	vfree(pfn_array);
   1.591  
   1.592 -    return vstart;
   1.593 +	return vstart;
   1.594  }
   1.595  
   1.596  EXPORT_SYMBOL(allocate_empty_lowmem_region);
   1.597 +
   1.598 +/*
   1.599 + * Local variables:
   1.600 + *  c-file-style: "linux"
   1.601 + *  indent-tabs-mode: t
   1.602 + *  c-indent-level: 8
   1.603 + *  c-basic-offset: 8
   1.604 + *  tab-width: 8
   1.605 + * End:
   1.606 + */
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c	Thu Aug 18 10:22:08 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c	Thu Aug 18 10:41:55 2005 +0000
     2.3 @@ -41,6 +41,8 @@
     2.4  #include <asm/sections.h>
     2.5  #include <asm-xen/hypervisor.h>
     2.6  
     2.7 +extern unsigned long *contiguous_bitmap;
     2.8 +
     2.9  #if defined(CONFIG_SWIOTLB)
    2.10  extern void swiotlb_init(void);
    2.11  int swiotlb;
    2.12 @@ -637,6 +639,11 @@ void __init mem_init(void)
    2.13  	int bad_ppro;
    2.14  	unsigned long pfn;
    2.15  
    2.16 +	contiguous_bitmap = alloc_bootmem_low_pages(
    2.17 +		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
    2.18 +	BUG_ON(!contiguous_bitmap);
    2.19 +	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
    2.20 +
    2.21  #if defined(CONFIG_SWIOTLB)
    2.22  	swiotlb_init();	
    2.23  #endif
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c	Thu Aug 18 10:22:08 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c	Thu Aug 18 10:41:55 2005 +0000
     3.3 @@ -40,6 +40,12 @@
     3.4  #include <asm/proto.h>
     3.5  #include <asm/smp.h>
     3.6  
     3.7 +extern unsigned long *contiguous_bitmap;
     3.8 +
     3.9 +#if defined(CONFIG_SWIOTLB)
    3.10 +extern void swiotlb_init(void);
    3.11 +#endif
    3.12 +
    3.13  #ifndef Dprintk
    3.14  #define Dprintk(x...)
    3.15  #endif
    3.16 @@ -794,8 +800,12 @@ void __init mem_init(void)
    3.17  	int codesize, reservedpages, datasize, initsize;
    3.18  	int tmp;
    3.19  
    3.20 +	contiguous_bitmap = alloc_bootmem_low_pages(
    3.21 +		(end_pfn + 2*BITS_PER_LONG) >> 3);
    3.22 +	BUG_ON(!contiguous_bitmap);
    3.23 +	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
    3.24 +
    3.25  #if defined(CONFIG_SWIOTLB)
    3.26 -	extern void swiotlb_init(void);
    3.27  	swiotlb_init();	
    3.28  #endif
    3.29  
     4.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h	Thu Aug 18 10:22:08 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h	Thu Aug 18 10:41:55 2005 +0000
     4.3 @@ -26,7 +26,9 @@ address_needs_mapping(struct device *hwd
     4.4  static inline int
     4.5  range_straddles_page_boundary(void *p, size_t size)
     4.6  {
     4.7 -	return ((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE);
     4.8 +	extern unsigned long *contiguous_bitmap;
     4.9 +	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
    4.10 +		!test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
    4.11  }
    4.12  
    4.13  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)