#include <asm/setup.h>
#include <asm/bzimage.h> /* for bzimage_parse */
#include <asm/io_apic.h>
-#include <asm/hap.h>
#include <asm/hpet.h>
#include <public/version.h>
nr_pages);
}
- if ( is_pvh_domain(d) )
- hap_set_alloc_for_pvh_dom0(d, dom0_paging_pages(d, nr_pages));
-
/*
- * We enable paging mode again so guest_physmap_add_page will do the
- * right thing for us.
+ * We enable paging mode again so guest_physmap_add_page and
+ * paging_set_allocation will do the right thing for us.
*/
d->arch.paging.mode = save_pvh_pg_mode;
+ if ( is_pvh_domain(d) )
+ {
+ bool preempted;
+
+ do {
+ preempted = false;
+ paging_set_allocation(d, dom0_paging_pages(d, nr_pages),
+ &preempted);
+ process_pending_softirqs();
+ } while ( preempted );
+ }
+
+
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
{
/* Set the pool of pages to the required number of pages.
* Returns 0 for success, non-zero for failure. */
-static int
-hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
+int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
{
struct page_info *pg;
/* Check to see if we need to yield and try again */
if ( preempted && general_preempt_check() )
{
- *preempted = 1;
+ *preempted = true;
return 0;
}
}
paging_unlock(d);
}
-void hap_teardown(struct domain *d, int *preempted)
+void hap_teardown(struct domain *d, bool *preempted)
{
struct vcpu *v;
mfn_t mfn;
int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
XEN_GUEST_HANDLE_PARAM(void) u_domctl)
{
- int rc, preempted = 0;
+ int rc;
+ bool preempted = false;
switch ( sc->op )
{
}
}
-void __init hap_set_alloc_for_pvh_dom0(struct domain *d,
- unsigned long hap_pages)
-{
- int rc;
-
- paging_lock(d);
- rc = hap_set_allocation(d, hap_pages, NULL);
- paging_unlock(d);
-
- BUG_ON(rc);
-}
-
static const struct paging_mode hap_paging_real_mode;
static const struct paging_mode hap_paging_protected_mode;
static const struct paging_mode hap_paging_pae_mode;
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
{
- int rc, preempted = 0;
+ int rc;
+ bool preempted = false;
if ( hap_enabled(d) )
hap_teardown(d, &preempted);
safe_write_pte(p, new);
}
+int paging_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
+{
+ int rc;
+
+ ASSERT(paging_mode_enabled(d));
+
+ paging_lock(d);
+ if ( hap_enabled(d) )
+ rc = hap_set_allocation(d, pages, preempted);
+ else
+ rc = sh_set_allocation(d, pages, preempted);
+ paging_unlock(d);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
paging_unlock(d);
}
-/* Set the pool of shadow pages to the required number of pages.
- * Input will be rounded up to at least shadow_min_acceptable_pages(),
- * plus space for the p2m table.
- * Returns 0 for success, non-zero for failure. */
-static int sh_set_allocation(struct domain *d,
- unsigned int pages,
- int *preempted)
+int sh_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
{
struct page_info *sp;
unsigned int lower_bound;
/* Check to see if we need to yield and try again */
if ( preempted && general_preempt_check() )
{
- *preempted = 1;
+ *preempted = true;
return 0;
}
}
return rv;
}
-void shadow_teardown(struct domain *d, int *preempted)
+void shadow_teardown(struct domain *d, bool *preempted)
/* Destroy the shadow pagetables of this domain and free its shadow memory.
* Should only be called for dying domains. */
{
xen_domctl_shadow_op_t *sc,
XEN_GUEST_HANDLE_PARAM(void) u_domctl)
{
- int rc, preempted = 0;
+ int rc;
+ bool preempted = false;
switch ( sc->op )
{
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
-void hap_teardown(struct domain *d, int *preempted);
+void hap_teardown(struct domain *d, bool *preempted);
void hap_vcpu_init(struct vcpu *v);
int hap_track_dirty_vram(struct domain *d,
unsigned long begin_pfn,
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
-void hap_set_alloc_for_pvh_dom0(struct domain *d, unsigned long num_pages);
+int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
#endif /* XEN_HAP_H */
void paging_dump_domain_info(struct domain *d);
void paging_dump_vcpu_info(struct vcpu *v);
+/* Set the pool of shadow pages to the required number of pages.
+ * Input might be rounded up to at minimum amount of pages, plus
+ * space for the p2m table.
+ * Returns 0 for success, non-zero for failure. */
+int paging_set_allocation(struct domain *d, unsigned int pages,
+ bool *preempted);
+
#endif /* XEN_PAGING_H */
/*
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
/* Call when destroying a domain */
-void shadow_teardown(struct domain *d, int *preempted);
+void shadow_teardown(struct domain *d, bool *preempted);
/* Call once all of the references to the domain have gone away */
void shadow_final_teardown(struct domain *d);
/* Discard _all_ mappings from the domain's shadows. */
void shadow_blow_tables_per_domain(struct domain *d);
+/* Set the pool of shadow pages to the required number of pages.
+ * Input will be rounded up to at least shadow_min_acceptable_pages(),
+ * plus space for the p2m table.
+ * Returns 0 for success, non-zero for failure. */
+int sh_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; })
#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+#define sh_set_allocation(d, pages, preempted) \
+ ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
bool_t fast, bool_t all) {}