goto fail;
clear_page(d->shared_info);
- share_xen_page_with_guest(
- virt_to_page(d->shared_info), d, XENSHARE_writable);
+ share_xen_page_with_guest(virt_to_page(d->shared_info), d, SHARE_rw);
switch ( config->config.gic_version )
{
return gfn_x(d->arch.p2m.max_mapped_gfn);
}
-void share_xen_page_with_guest(struct page_info *page,
- struct domain *d, int readonly)
+void share_xen_page_with_guest(struct page_info *page, struct domain *d,
+ enum XENSHARE_flags flags)
{
if ( page_get_owner(page) == d )
return;
spin_lock(&d->page_alloc_lock);
/* The incremented type count pins as writable or read-only. */
- page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page) | 1;
+ page->u.inuse.type_info =
+ (flags == SHARE_ro ? PGT_none : PGT_writable_page) | 1;
page_set_owner(page, d);
smp_wmb(); /* install valid domain ptr before updating refcnt. */
spin_unlock(&d->page_alloc_lock);
}
-void share_xen_page_with_privileged_guests(
- struct page_info *page, int readonly)
-{
- share_xen_page_with_guest(page, dom_xen, readonly);
-}
-
int xenmem_add_to_physmap_one(
struct domain *d,
unsigned int space,
goto fail;
clear_page(d->shared_info);
- share_xen_page_with_guest(
- virt_to_page(d->shared_info), d, XENSHARE_writable);
+ share_xen_page_with_guest(virt_to_page(d->shared_info), d, SHARE_rw);
if ( (rc = init_domain_irq_mapping(d)) != 0 )
goto fail;
continue;
}
- share_xen_page_with_guest(pg, d, XENSHARE_writable);
+ share_xen_page_with_guest(pg, d, SHARE_rw);
rc = guest_physmap_add_entry(d, _gfn(mfn), _mfn(mfn), 0, p2m_ram_rw);
if ( rc )
printk("Unable to add mfn %#lx to p2m: %d\n", mfn, rc);
return -ENOMEM;
mfn = page_to_mfn(pg);
clear_domain_page(_mfn(mfn));
- share_xen_page_with_guest(pg, d, XENSHARE_writable);
+ share_xen_page_with_guest(pg, d, SHARE_rw);
d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
i < (pvh_boot ? (1 + PFN_UP(trampoline_end - trampoline_start))
: 0x100);
i++ )
- share_xen_page_with_guest(mfn_to_page(_mfn(i)),
- dom_io, XENSHARE_writable);
+ share_xen_page_with_guest(mfn_to_page(_mfn(i)), dom_io, SHARE_rw);
/* Any areas not specified as RAM by the e820 map are considered I/O. */
for ( i = 0, pfn = 0; pfn < max_page; i++ )
{
if ( !mfn_valid(_mfn(pfn)) )
continue;
- share_xen_page_with_guest(
- mfn_to_page(_mfn(pfn)), dom_io, XENSHARE_writable);
+
+ share_xen_page_with_guest(mfn_to_page(_mfn(pfn)), dom_io, SHARE_rw);
}
/* Skip the RAM region. */
return (arch_get_max_pfn(d) ?: 1) - 1;
}
-void share_xen_page_with_guest(
- struct page_info *page, struct domain *d, int readonly)
+void share_xen_page_with_guest(struct page_info *page, struct domain *d,
+ enum XENSHARE_flags flags)
{
if ( page_get_owner(page) == d )
return;
spin_lock(&d->page_alloc_lock);
/* The incremented type count pins as writable or read-only. */
- page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
+ page->u.inuse.type_info =
+ (flags == SHARE_ro ? PGT_none : PGT_writable_page);
page->u.inuse.type_info |= PGT_validated | 1;
page_set_owner(page, d);
return 0;
}
-void share_xen_page_with_privileged_guests(
- struct page_info *page, int readonly)
-{
- share_xen_page_with_guest(page, dom_xen, readonly);
-}
-
void free_shared_domheap_page(struct page_info *page)
{
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
(si) = param; \
if ( va ) \
{ \
- share_xen_page_with_guest(mfn_to_page(_mfn(param)), d, \
- XENSHARE_writable); \
+ share_xen_page_with_guest(mfn_to_page(_mfn(param)), d, SHARE_rw); \
replace_va_mapping(d, l4start, va, _mfn(param)); \
dom0_update_physmap(d, PFN_DOWN((va) - va_start), param, vphysmap); \
} \
clear_page(page);
console_mfn = virt_to_mfn(page);
si->console.domU.mfn = mfn_x(console_mfn);
- share_xen_page_with_guest(mfn_to_page(console_mfn), d,
- XENSHARE_writable);
+ share_xen_page_with_guest(mfn_to_page(console_mfn), d, SHARE_rw);
replace_va_mapping(d, l4start, console_va, console_mfn);
dom0_update_physmap(d, (console_va - va_start) >> PAGE_SHIFT,
mfn_x(console_mfn), vphysmap);
{
struct page_info *page = mfn_to_page(m2p_start_mfn + i);
if (hotadd_mem_valid(m2p_start_mfn + i, info))
- share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
+ share_xen_page_with_privileged_guests(page, SHARE_ro);
}
}
{
struct page_info *page = mfn_to_page(m2p_start_mfn + i);
if (hotadd_mem_valid(m2p_start_mfn + i, info))
- share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
+ share_xen_page_with_privileged_guests(page, SHARE_ro);
}
}
return 0;
}
for ( i = 0; i < n; i++ )
- {
- struct page_info *page = mfn_to_page(m2p_start_mfn + i);
- share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
- }
+ share_xen_page_with_privileged_guests(
+ mfn_to_page(m2p_start_mfn + i), SHARE_ro);
}
for ( v = RDWR_COMPAT_MPT_VIRT_START;
m2p_start_mfn = l2e_get_pfn(l2e);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
- {
- struct page_info *page = mfn_to_page(m2p_start_mfn + i);
- share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
- }
+ share_xen_page_with_privileged_guests(
+ mfn_to_page(m2p_start_mfn + i), SHARE_ro);
}
/* Mark all of direct map NX if hardware supports it. */
for_each_online_cpu(cpu)
{
struct t_buf *buf;
- struct page_info *pg;
spin_lock_init(&per_cpu(t_lock, cpu));
/* Now share the trace pages */
for ( i = 0; i < pages; i++ )
- {
- pg = mfn_to_page(t_info_mfn_list[offset + i]);
- share_xen_page_with_privileged_guests(pg, XENSHARE_writable);
- }
+ share_xen_page_with_privileged_guests(
+ mfn_to_page(t_info_mfn_list[offset + i]), SHARE_rw);
}
/* Finally, share the t_info page */
for(i = 0; i < t_info_pages; i++)
share_xen_page_with_privileged_guests(
- virt_to_page(t_info) + i, XENSHARE_readonly);
+ virt_to_page(t_info) + i, SHARE_ro);
data_size = (pages * PAGE_SIZE - sizeof(struct t_buf));
t_buf_highwater = data_size >> 1; /* 50% high water */
}
for ( i = 0; i < npages; i++ )
- share_xen_page_with_guest(mfn_to_page(mfn_add(mfn, i)),
- d, XENSHARE_writable);
+ share_xen_page_with_guest(mfn_to_page(mfn_add(mfn, i)), d, SHARE_rw);
return 0;
}
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
- virt_to_page((char *)(t)->shared_raw[i]), \
- (d), XENSHARE_writable); \
+ virt_to_page((char *)(t)->shared_raw[i]), d, SHARE_rw); \
} while ( 0 )
#define gnttab_shared_gmfn(d, t, i) \
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
-#define XENSHARE_writable 0
-#define XENSHARE_readonly 1
-extern void share_xen_page_with_guest(
- struct page_info *page, struct domain *d, int readonly);
-extern void share_xen_page_with_privileged_guests(
- struct page_info *page, int readonly);
-
#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
/* PDX of the first page in the frame table. */
extern unsigned long frametable_base_pdx;
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
- virt_to_page((char *)(t)->shared_raw[i]), \
- (d), XENSHARE_writable); \
+ virt_to_page((char *)(t)->shared_raw[i]), d, SHARE_rw); \
} while ( 0 )
#define gnttab_create_status_page(d, t, i) \
do { \
share_xen_page_with_guest( \
- virt_to_page((char *)(t)->status[i]), \
- (d), XENSHARE_writable); \
+ virt_to_page((char *)(t)->status[i]), d, SHARE_rw); \
} while ( 0 )
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
-#define XENSHARE_writable 0
-#define XENSHARE_readonly 1
-extern void share_xen_page_with_guest(
- struct page_info *page, struct domain *d, int readonly);
-extern int unshare_xen_page_with_guest(struct page_info *page,
- struct domain *d);
-extern void share_xen_page_with_privileged_guests(
- struct page_info *page, int readonly);
extern void free_shared_domheap_page(struct page_info *page);
#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
}
}
+enum XENSHARE_flags {
+ SHARE_rw,
+ SHARE_ro,
+};
+void share_xen_page_with_guest(struct page_info *page, struct domain *d,
+ enum XENSHARE_flags flags);
+int unshare_xen_page_with_guest(struct page_info *page, struct domain *d);
+
+static inline void share_xen_page_with_privileged_guests(
+ struct page_info *page, enum XENSHARE_flags flags)
+{
+ share_xen_page_with_guest(page, dom_xen, flags);
+}
+
#endif /* __XEN_MM_H__ */