#include <xen/paging.h>
#include <xen/keyhandler.h>
#include <xen/radix-tree.h>
-#include <xen/vmap.h>
+#include <xen/xvmalloc.h>
#include <xen/nospec.h>
#include <xsm/xsm.h>
#include <asm/flushtlb.h>
return -EINVAL;
}
- if ( (gt = xzalloc(struct grant_table)) == NULL )
+ if ( (gt = xvzalloc(struct grant_table)) == NULL )
return -ENOMEM;
/* Simple stuff. */
d->grant_table = gt;
/* Active grant table. */
- gt->active = xzalloc_array(struct active_grant_entry *,
- max_nr_active_grant_frames(gt));
+ gt->active = xvzalloc_array(struct active_grant_entry *,
+ max_nr_active_grant_frames(gt));
if ( gt->active == NULL )
goto out;
/* Tracking of mapped foreign frames table */
if ( gt->max_maptrack_frames )
{
- gt->maptrack = vzalloc(gt->max_maptrack_frames * sizeof(*gt->maptrack));
+ gt->maptrack = xvzalloc_array(struct grant_mapping *,
+ gt->max_maptrack_frames);
if ( gt->maptrack == NULL )
goto out;
}
/* Shared grant table. */
- gt->shared_raw = xzalloc_array(void *, gt->max_grant_frames);
+ gt->shared_raw = xvzalloc_array(void *, gt->max_grant_frames);
if ( gt->shared_raw == NULL )
goto out;
/* Status pages for grant table - for version 2 */
- gt->status = xzalloc_array(grant_status_t *,
- grant_to_status_frames(gt->max_grant_frames));
+ gt->status = xvzalloc_array(grant_status_t *,
+ grant_to_status_frames(gt->max_grant_frames));
if ( gt->status == NULL )
goto out;
if ( t == NULL )
return;
+ d->grant_table = NULL;
+
for ( i = 0; i < nr_grant_frames(t); i++ )
free_xenheap_page(t->shared_raw[i]);
- xfree(t->shared_raw);
+ xvfree(t->shared_raw);
ASSERT(!t->maptrack_limit);
- vfree(t->maptrack);
+ xvfree(t->maptrack);
for ( i = 0; i < nr_active_grant_frames(t); i++ )
free_xenheap_page(t->active[i]);
- xfree(t->active);
+ xvfree(t->active);
for ( i = 0; i < nr_status_frames(t); i++ )
free_xenheap_page(t->status[i]);
- xfree(t->status);
+ xvfree(t->status);
- xfree(t);
- d->grant_table = NULL;
+ xvfree(t);
}
void grant_table_init_vcpu(struct vcpu *v)
#include <xen/spinlock.h>
#include <xen/types.h>
#include <xen/vmap.h>
+#include <xen/xvmalloc.h>
#include <asm/page.h>
static DEFINE_SPINLOCK(vm_lock);
return p;
}
-void vfree(void *va)
+static void _vfree(const void *va, unsigned int pages)
{
- unsigned int i, pages;
+ unsigned int i;
struct page_info *pg;
PAGE_LIST_HEAD(pg_list);
- if ( !va )
- return;
-
- pages = vmap_size(va);
ASSERT(pages);
for ( i = 0; i < pages; i++ )
{
- struct page_info *page = vmap_to_page(va + i * PAGE_SIZE);
-
- ASSERT(page);
- page_list_add(page, &pg_list);
+ pg = vmap_to_page(va + i * PAGE_SIZE);
+ ASSERT(pg);
+ page_list_add(pg, &pg_list);
}
vunmap(va);
while ( (pg = page_list_remove_head(&pg_list)) != NULL )
free_domheap_page(pg);
}
+
+void vfree(void *va)
+{
+ if ( !va )
+ return;
+
+ _vfree(va, vmap_size(va));
+}
+
+void xvfree(void *va)
+{
+ unsigned int pages = vm_size(va, VMAP_DEFAULT);
+
+ if ( pages )
+ _vfree(va, pages);
+ else
+ xfree(va);
+}
+
+void *_xvmalloc(size_t size, unsigned int align)
+{
+ ASSERT(align <= PAGE_SIZE);
+ return size <= PAGE_SIZE ? _xmalloc(size, align) : vmalloc(size);
+}
+
+void *_xvzalloc(size_t size, unsigned int align)
+{
+ ASSERT(align <= PAGE_SIZE);
+ return size <= PAGE_SIZE ? _xzalloc(size, align) : vzalloc(size);
+}
+
+void *_xvrealloc(void *va, size_t size, unsigned int align)
+{
+ size_t pages = vm_size(va, VMAP_DEFAULT);
+ void *ptr;
+
+ ASSERT(align <= PAGE_SIZE);
+
+ if ( !pages )
+ {
+ if ( size <= PAGE_SIZE )
+ return _xrealloc(va, size, align);
+
+ ptr = vmalloc(size);
+ if ( ptr && va && va != ZERO_BLOCK_PTR )
+ {
+ /*
+ * xmalloc-based allocations up to PAGE_SIZE don't cross page
+ * boundaries. Therefore, without needing to know the exact
+ * prior allocation size, simply copy the entire tail of the
+ * page containing the earlier allocation.
+ */
+ memcpy(ptr, va, PAGE_SIZE - PAGE_OFFSET(va));
+ xfree(va);
+ }
+ }
+ else if ( pages == PFN_UP(size) )
+ ptr = va;
+ else
+ {
+ ptr = _xvmalloc(size, align);
+ if ( ptr )
+ {
+ memcpy(ptr, va, min(size, pages << PAGE_SHIFT));
+ vfree(va);
+ }
+ else if ( pages > PFN_UP(size) )
+ ptr = va;
+ }
+
+ return ptr;
+}
#endif
/*
* Xen malloc/free-style interface.
+ *
+ * NOTE: Unless physically contiguous memory space is required, the interfaces
+ * in xvmalloc.h are to be used in preference to the ones here.
*/
/* Allocate space for typed object. */
--- /dev/null
+#ifndef XEN__XVMALLOC_H
+#define XEN__XVMALLOC_H
+
+#include <xen/types.h>
+
+/*
+ * Xen malloc/free-style interface, as long as there's no need to have
+ * physically contiguous memory allocated. These should be used in preference
+ * to xmalloc() et al.
+ */
+
+/* Allocate space for typed object. */
+#define xvmalloc(_type) ((_type *)_xvmalloc(sizeof(_type), __alignof__(_type)))
+#define xvzalloc(_type) ((_type *)_xvzalloc(sizeof(_type), __alignof__(_type)))
+
+/* Allocate space for a typed object and copy an existing instance. */
+#define xvmemdup(ptr) \
+({ \
+ void *p_ = _xvmalloc(sizeof(*(ptr)), __alignof__(*(ptr))); \
+ if ( p_ ) \
+ memcpy(p_, ptr, sizeof(*(ptr))); \
+ (typeof(*(ptr)) *)p_; \
+})
+
+/* Allocate space for array of typed objects. */
+#define xvmalloc_array(_type, _num) \
+ ((_type *)_xvmalloc_array(sizeof(_type), __alignof__(_type), _num))
+#define xvzalloc_array(_type, _num) \
+ ((_type *)_xvzalloc_array(sizeof(_type), __alignof__(_type), _num))
+
+/* Allocate space for a structure with a flexible array of typed objects. */
+#define xvzalloc_flex_struct(type, field, nr) \
+ ((type *)_xvzalloc(offsetof(type, field[nr]), __alignof__(type)))
+
+#define xvmalloc_flex_struct(type, field, nr) \
+ ((type *)_xvmalloc(offsetof(type, field[nr]), __alignof__(type)))
+
+/* Re-allocate space for a structure with a flexible array of typed objects. */
+#define xvrealloc_flex_struct(ptr, field, nr) \
+ ((typeof(ptr))_xvrealloc(ptr, offsetof(typeof(*(ptr)), field[nr]), \
+ __alignof__(typeof(*(ptr)))))
+
+/* Free any of the above. */
+void xvfree(void *);
+
+/* Free an allocation, and zero the pointer to it. */
+#define XVFREE(p) do { \
+ xvfree(p); \
+ (p) = NULL; \
+} while ( false )
+
+/* Underlying functions */
+void *_xvmalloc(size_t size, unsigned int align);
+void *_xvzalloc(size_t size, unsigned int align);
+void *_xvrealloc(void *ptr, size_t size, unsigned int align);
+
+static inline void *_xvmalloc_array(
+ size_t size, unsigned int align, unsigned long num)
+{
+ /* Check for overflow. */
+ if ( size && num > UINT_MAX / size )
+ return NULL;
+ return _xvmalloc(size * num, align);
+}
+
+static inline void *_xvzalloc_array(
+ size_t size, unsigned int align, unsigned long num)
+{
+ /* Check for overflow. */
+ if ( size && num > UINT_MAX / size )
+ return NULL;
+ return _xvzalloc(size * num, align);
+}
+
+#endif /* XEN__XVMALLOC_H */