unsigned int mode,
xc_shadow_op_stats_t *stats);
+int xc_get_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t *size);
+int xc_set_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t size);
+
int xc_sched_credit_domain_set(xc_interface *xch,
uint32_t domid,
struct xen_domctl_sched_credit *sdom);
return (rc == 0) ? domctl.u.shadow_op.pages : rc;
}
+int xc_get_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t *size)
+{
+ int rc;
+ struct xen_domctl domctl = {
+ .cmd = XEN_DOMCTL_get_paging_mempool_size,
+ .domain = domid,
+ };
+
+ rc = do_domctl(xch, &domctl);
+ if ( rc )
+ return rc;
+
+ *size = domctl.u.paging_mempool.size;
+ return 0;
+}
+
+int xc_set_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t size)
+{
+ struct xen_domctl domctl = {
+ .cmd = XEN_DOMCTL_set_paging_mempool_size,
+ .domain = domid,
+ .u.paging_mempool = {
+ .size = size,
+ },
+ };
+
+ return do_domctl(xch, &domctl);
+}
+
int xc_domain_setmaxmem(xc_interface *xch,
uint32_t domid,
uint64_t max_memkb)
return ROUNDUP(nr_pages, 1 << (20 - PAGE_SHIFT)) >> (20 - PAGE_SHIFT);
}
+/* Return the size of the pool, in bytes. */
+int arch_get_paging_mempool_size(struct domain *d, uint64_t *size)
+{
+ *size = (uint64_t)ACCESS_ONCE(d->arch.paging.p2m_total_pages) << PAGE_SHIFT;
+ return 0;
+}
+
/*
* Set the pool of pages to the required number of pages.
* Returns 0 for success, non-zero for failure.
return 0;
}
+int arch_set_paging_mempool_size(struct domain *d, uint64_t size)
+{
+ unsigned long pages = size >> PAGE_SHIFT;
+ bool preempted = false;
+ int rc;
+
+ if ( (size & ~PAGE_MASK) || /* Non page-sized request? */
+ pages != (size >> PAGE_SHIFT) ) /* 32-bit overflow? */
+ return -EINVAL;
+
+ spin_lock(&d->arch.paging.lock);
+ rc = p2m_set_allocation(d, pages, &preempted);
+ spin_unlock(&d->arch.paging.lock);
+
+ ASSERT(preempted == (rc == -ERESTART));
+
+ return rc;
+}
+
int p2m_teardown_allocation(struct domain *d)
{
int ret = 0;
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
unsigned int hap_get_allocation(struct domain *d);
+int hap_get_allocation_bytes(struct domain *d, uint64_t *size);
#endif /* XEN_HAP_H */
int shadow_set_allocation(struct domain *d, unsigned int pages,
bool *preempted);
+int shadow_get_allocation_bytes(struct domain *d, uint64_t *size);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_vcpu_teardown(v) ASSERT(is_pv_vcpu(v))
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
#define shadow_set_allocation(d, pages, preempted) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+#define shadow_get_allocation_bytes(d, size) \
+ ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
int fast, int all) {}
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
+int hap_get_allocation_bytes(struct domain *d, uint64_t *size)
+{
+ unsigned long pages = d->arch.paging.hap.total_pages;
+
+ pages += d->arch.paging.hap.p2m_pages;
+
+ *size = pages << PAGE_SHIFT;
+
+ return 0;
+}
+
/* Set the pool of pages to the required number of pages.
* Returns 0 for success, non-zero for failure. */
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
}
#endif
+int arch_get_paging_mempool_size(struct domain *d, uint64_t *size)
+{
+ int rc;
+
+ if ( is_pv_domain(d) ) /* TODO: Relax in due course */
+ return -EOPNOTSUPP;
+
+ if ( hap_enabled(d) )
+ rc = hap_get_allocation_bytes(d, size);
+ else
+ rc = shadow_get_allocation_bytes(d, size);
+
+ return rc;
+}
+
+int arch_set_paging_mempool_size(struct domain *d, uint64_t size)
+{
+ unsigned long pages = size >> PAGE_SHIFT;
+ bool preempted = false;
+ int rc;
+
+ if ( is_pv_domain(d) ) /* TODO: Relax in due course */
+ return -EOPNOTSUPP;
+
+ if ( size & ~PAGE_MASK || /* Non page-sized request? */
+ pages != (unsigned int)pages ) /* Overflow $X_set_allocation()? */
+ return -EINVAL;
+
+ paging_lock(d);
+ if ( hap_enabled(d) )
+ rc = hap_set_allocation(d, pages, &preempted);
+ else
+ rc = shadow_set_allocation(d, pages, &preempted);
+ paging_unlock(d);
+
+ /*
+ * TODO: Adjust $X_set_allocation() so this is true.
+ ASSERT(preempted == (rc == -ERESTART));
+ */
+
+ return preempted ? -ERESTART : rc;
+}
+
/*
* Local variables:
* mode: C
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
+int shadow_get_allocation_bytes(struct domain *d, uint64_t *size)
+{
+ unsigned long pages = d->arch.paging.shadow.total_pages;
+
+ pages += d->arch.paging.shadow.p2m_pages;
+
+ *size = pages << PAGE_SHIFT;
+
+ return 0;
+}
+
/**************************************************************************/
/* Hash table for storing the guest->shadow mappings.
* The table itself is an array of pointers to shadows; the shadows are then
ret = iommu_do_domctl(op, d, u_domctl);
break;
+ case XEN_DOMCTL_get_paging_mempool_size:
+ ret = arch_get_paging_mempool_size(d, &op->u.paging_mempool.size);
+ if ( !ret )
+ copyback = 1;
+ break;
+
+ case XEN_DOMCTL_set_paging_mempool_size:
+ ret = arch_set_paging_mempool_size(d, op->u.paging_mempool.size);
+
+ if ( ret == -ERESTART )
+ ret = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
+ break;
+
default:
ret = arch_do_domctl(op, d, u_domctl);
break;
/* Return the bitmap but do not modify internal copy. */
#define XEN_DOMCTL_SHADOW_OP_PEEK 12
-/* Memory allocation accessors. */
+/*
+ * Memory allocation accessors. These APIs are broken and will be removed.
+ * Use XEN_DOMCTL_{get,set}_paging_mempool_size instead.
+ */
#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
xen_pfn_t start_pfn, nr_pfns;
};
+/*
+ * XEN_DOMCTL_get_paging_mempool_size / XEN_DOMCTL_set_paging_mempool_size.
+ *
+ * Get or set the paging memory pool size. The size is in bytes.
+ *
+ * This is a dedicated pool of memory for Xen to use while managing the guest,
+ * typically containing pagetables. As such, there is an implementation
+ * specific minimum granularity.
+ *
+ * The set operation can fail mid-way through the request (e.g. Xen running
+ * out of memory, no free memory to reclaim from the pool, etc.).
+ */
+struct xen_domctl_paging_mempool {
+ uint64_aligned_t size; /* Size in bytes. */
+};
+
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_vcpu_msr {
uint32_t index;
#define XEN_DOMCTL_get_cpu_policy 82
#define XEN_DOMCTL_set_cpu_policy 83
#define XEN_DOMCTL_vmtrace_op 84
+#define XEN_DOMCTL_get_paging_mempool_size 85
+#define XEN_DOMCTL_set_paging_mempool_size 86
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
struct xen_domctl_psr_alloc psr_alloc;
struct xen_domctl_vuart_op vuart_op;
struct xen_domctl_vmtrace_op vmtrace_op;
+ struct xen_domctl_paging_mempool paging_mempool;
uint8_t pad[128];
} u;
};
int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+int arch_get_paging_mempool_size(struct domain *d, uint64_t *size /* bytes */);
+int arch_set_paging_mempool_size(struct domain *d, uint64_t size /* bytes */);
+
int domain_relinquish_resources(struct domain *d);
void dump_pageframe_info(struct domain *d);