#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
#define xlat_page_size COMPAT_ARG_XLAT_SIZE
#define xlat_page_left_size(xlat_page_current) \
- (xlat_page_start + xlat_page_size - xlat_page_current)
+ (xlat_page_start + xlat_page_size - (xlat_page_current))
#define xlat_malloc_init(xlat_page_current) do { \
- xlat_page_current = xlat_page_start; \
+ (xlat_page_current) = xlat_page_start; \
} while (0)
extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size);
-#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c))
+#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&(_p), \
+ sizeof(_t) * (_c)))
/*
* Valid if in +ve half of 48-bit address space, or above Xen-reserved area.