release_compat_l4(d->vcpu[vcpuid]);
}
- d->arch.physaddr_bitsize = 64;
-
return 0;
}
#ifdef CONFIG_COMPAT
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
- d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
+ d->arch.physaddr_bitsize =
fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ (PAGE_SIZE - 2);
if ( value > (!is_pv_32on64_domain(d) ?
"can be accessed by Xen in 32-bit mode.");
#endif
-#ifdef __x86_64__
- clip_to_limit((uint64_t)(MACH2PHYS_COMPAT_VIRT_END -
- __HYPERVISOR_COMPAT_VIRT_START) << 10,
- "Only the first %u GB of the physical memory map "
- "can be accessed by 32-on-64 guests.");
-#endif
-
reserve_dmi_region();
}
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
{
- if ( d == NULL )
+ if ( (d == NULL) || !is_pv_32on64_domain(d) )
return bits;
return min(d->arch.physaddr_bitsize, bits);
}
if ( xsm_grant_transfer(d, e) )
{
+ unlock_and_copyback:
rcu_unlock_domain(e);
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
gop.status = GNTST_permission_denied;
goto copyback;
}
+ if ( (1UL << domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)) <= mfn )
+ {
+ struct page_info *new_page;
+ void *sp, *dp;
+
+ new_page = alloc_domheap_pages(
+ NULL, 0,
+ MEMF_bits(domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)));
+ if ( new_page == NULL )
+ goto unlock_and_copyback;
+
+ sp = map_domain_page(mfn);
+ dp = map_domain_page(page_to_mfn(new_page));
+ memcpy(dp, sp, PAGE_SIZE);
+ unmap_domain_page(dp);
+ unmap_domain_page(sp);
+
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
+ page = new_page;
+ }
+
spin_lock(&e->page_alloc_lock);
/*
goto fail_early;
}
- if ( (exch.out.address_bits != 0) &&
- (exch.out.address_bits <
- (get_order_from_pages(max_page) + PAGE_SHIFT)) )
- {
- if ( exch.out.address_bits <= PAGE_SHIFT )
- {
- rc = -ENOMEM;
- goto fail_early;
- }
- memflags = MEMF_bits(exch.out.address_bits);
- }
-
if ( exch.in.extent_order <= exch.out.extent_order )
{
in_chunk_order = exch.out.extent_order - exch.in.extent_order;
}
d = current->domain;
+ memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
+ d, exch.out.address_bits ? : BITS_PER_LONG));
+
cpu = select_local_cpu(d);
for ( i = (exch.nr_exchanged >> in_chunk_order);
ASSERT(!in_irq());
- if ( bits )
- {
- bits = domain_clamp_alloc_bitsize(d, bits);
- if ( bits <= (PAGE_SHIFT + 1) )
- return NULL;
- bits -= PAGE_SHIFT + 1;
- if ( bits < zone_hi )
- zone_hi = bits;
- }
+ bits = domain_clamp_alloc_bitsize(d, bits ? : BITS_PER_LONG);
+ if ( bits <= (PAGE_SHIFT + 1) )
+ return NULL;
+
+ bits -= PAGE_SHIFT + 1;
+ if ( bits < zone_hi )
+ zone_hi = bits;
if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
{