#include <xen/init.h>
#include <xen/lib.h>
-extern const struct device_desc _sdevice[], _edevice[];
-extern const struct acpi_device_desc _asdevice[], _aedevice[];
+typedef struct device_desc device_desc_t;
+DECLARE_BOUNDS(device_desc, _sdevice, _edevice);
+typedef struct acpi_device_desc acpi_device_desc_t;
+DECLARE_BOUNDS(acpi_device_desc, _asdevice, _aedevice);
int __init device_init(struct dt_device_node *dev, enum device_class class,
const void *data)
if ( !dt_device_is_available(dev) || dt_device_for_passthrough(dev) )
return -ENODEV;
- for ( desc = _sdevice; desc != _edevice; desc++ )
+ for ( desc = _sdevice; device_desc_diff(desc, _edevice) != 0; desc++ )
{
if ( desc->class != class )
continue;
{
const struct acpi_device_desc *desc;
- for ( desc = _asdevice; desc != _aedevice; desc++ )
+ for ( desc = _asdevice;
+ acpi_device_desc_diff(desc, _aedevice) != 0;
+ desc++ )
{
if ( ( desc->class != class ) || ( desc->class_type != class_type ) )
continue;
ASSERT(dev != NULL);
- for ( desc = _sdevice; desc != _edevice; desc++ )
+ for ( desc = _sdevice; device_desc_diff(desc, _edevice) != 0; desc++ )
{
if ( dt_match_node(desc->dt_match, dev) )
return desc->class;
unsigned long max_page;
unsigned long total_pages;
-extern char __init_begin[], __init_end[];
+typedef char init_t;
+DECLARE_BOUNDS(init, __init_begin, __init_end);
/* Checking VA memory layout alignment. */
static inline void check_memory_layout_alignment_constraints(void) {
void free_init_memory(void)
{
paddr_t pa = virt_to_maddr(__init_begin);
- unsigned long len = __init_end - __init_begin;
+ unsigned long len = init_diff(__init_begin, __init_end);
uint32_t insn;
unsigned int i, nr = len / sizeof(insn);
uint32_t *p;
set_pte_flags_on_range(__init_begin, len, mg_clear);
init_domheap_pages(pa, pa + len);
- printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
+ printk("Freed %ldkB init memory.\n",
+ init_diff(__init_begin, __init_end) >> 10);
}
void arch_dump_shared_mem_info(void)
unsigned long __per_cpu_offset[NR_CPUS];
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER (get_order_from_bytes(per_cpu_diff(__per_cpu_start, \
+ __per_cpu_data_end)))
void __init percpu_init_areas(void)
{
static int init_percpu_area(unsigned int cpu)
{
- char *p;
+ struct abstract_per_cpu *p;
if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
return -EBUSY;
if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
return -ENOMEM;
- memset(p, 0, __per_cpu_data_end - __per_cpu_start);
- __per_cpu_offset[cpu] = p - __per_cpu_start;
+ memset(p, 0, per_cpu_diff(__per_cpu_start, __per_cpu_data_end));
+ __per_cpu_offset[cpu] = per_cpu_diff(__per_cpu_start, p);
return 0;
}
{
struct free_info *info = container_of(head, struct free_info, rcu);
unsigned int cpu = info->cpu;
- char *p = __per_cpu_start + __per_cpu_offset[cpu];
+ char *p = (char *)__per_cpu_start + __per_cpu_offset[cpu];
free_xenheap_pages(p, PERCPU_ORDER);
__per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
}
#include <xen/init.h>
#include <asm/psci.h>
-extern const struct platform_desc _splatform[], _eplatform[];
+typedef struct platform_desc platform_desc_t;
+DECLARE_BOUNDS(platform_desc, _splatform, _eplatform);
/* Pointer to the current platform description */
static const struct platform_desc *platform;
ASSERT(platform == NULL);
/* Looking for the platform description */
- for ( platform = _splatform; platform != _eplatform; platform++ )
+ for ( platform = _splatform;
+ platform_desc_diff(platform, _eplatform) != 0;
+ platform++ )
{
if ( platform_is_compatible(platform) )
break;
}
/* We don't have specific operations for this platform */
- if ( platform == _eplatform )
+ if ( platform_desc_diff(platform, _eplatform) == 0 )
{
/* TODO: dump DT machine compatible node */
printk(XENLOG_INFO "Platform: Generic System\n");
#ifndef __ASSEMBLY__
+#include <xen/lib.h>
#include <xen/types.h>
#include <asm/sysregs.h>
-extern char __per_cpu_start[], __per_cpu_data_end[];
+typedef char per_cpu_t;
+DECLARE_BOUNDS(per_cpu, __per_cpu_start, __per_cpu_data_end);
extern unsigned long __per_cpu_offset[NR_CPUS];
void percpu_init_areas(void);