copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64
Xen.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen-unstable changeset: 16548:
cd5e1e76d0bc66440a04122baa27860f5d763b5b
xen-unstable date: Thu Dec 06 13:39:19 2007 +0000
32-on-64: Fixes to previous changeset.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen-unstable changeset: 16549:
baf90ee3c1dab65f4a386521be6e108920622866
xen-unstable date: Thu Dec 06 13:56:00 2007 +0000
release_compat_l4(d->vcpu[vcpuid]);
}
- d->arch.physaddr_bitsize = 64;
-
return 0;
}
#ifdef CONFIG_COMPAT
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
- d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
+ d->arch.physaddr_bitsize =
fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ (PAGE_SIZE - 2);
if ( value > (!is_pv_32on64_domain(d) ?
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
{
- if ( d == NULL )
+ if ( (d == NULL) || !is_pv_32on64_domain(d) )
return bits;
return min(d->arch.physaddr_bitsize, bits);
}
grant_entry_t *sha;
struct gnttab_transfer gop;
unsigned long mfn;
+ unsigned int max_bitsize;
for ( i = 0; i < count; i++ )
{
goto copyback;
}
+ max_bitsize = domain_clamp_alloc_bitsize(
+ e, BITS_PER_LONG+PAGE_SHIFT-1);
+ if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
+ {
+ struct page_info *new_page;
+ void *sp, *dp;
+
+ new_page = alloc_domheap_pages(NULL, 0, MEMF_bits(max_bitsize));
+ if ( new_page == NULL )
+ {
+ rcu_unlock_domain(e);
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
+ gop.status = GNTST_address_too_big;
+ goto copyback;
+ }
+
+ sp = map_domain_page(mfn);
+ dp = map_domain_page(page_to_mfn(new_page));
+ memcpy(dp, sp, PAGE_SIZE);
+ unmap_domain_page(dp);
+ unmap_domain_page(sp);
+
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
+ page = new_page;
+ }
+
spin_lock(&e->page_alloc_lock);
/*
goto fail_early;
}
- if ( (exch.out.address_bits != 0) &&
- (exch.out.address_bits <
- (get_order_from_pages(max_page) + PAGE_SHIFT)) )
- {
- if ( exch.out.address_bits <= PAGE_SHIFT )
- {
- rc = -ENOMEM;
- goto fail_early;
- }
- memflags = MEMF_bits(exch.out.address_bits);
- }
-
if ( exch.in.extent_order <= exch.out.extent_order )
{
in_chunk_order = exch.out.extent_order - exch.in.extent_order;
}
d = current->domain;
+ memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
+ d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
+
cpu = select_local_cpu(d);
for ( i = (exch.nr_exchanged >> in_chunk_order);
ASSERT(!in_irq());
- if ( bits )
- {
- bits = domain_clamp_alloc_bitsize(d, bits);
- if ( bits <= (PAGE_SHIFT + 1) )
- return NULL;
- bits -= PAGE_SHIFT + 1;
- if ( bits < zone_hi )
- zone_hi = bits;
- }
+ bits = domain_clamp_alloc_bitsize(d, bits ? : (BITS_PER_LONG+PAGE_SHIFT));
+ if ( bits <= (PAGE_SHIFT + 1) )
+ return NULL;
+
+ bits -= PAGE_SHIFT + 1;
+ if ( bits < zone_hi )
+ zone_hi = bits;
if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
{
#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
-#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
+#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTTABOP_error_msgs { \
"okay", \
"no spare translation slot in the I/O MMU", \
"permission denied", \
"bad page", \
- "copy arguments cross page boundary" \
+ "copy arguments cross page boundary", \
+ "page address size too large" \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */