* Any Xen-heap pages that we will allow to be mapped will have
* their domain field set to dom_xen.
*/
- dom_xen = alloc_domain();
- spin_lock_init(&dom_xen->page_alloc_lock);
- atomic_set(&dom_xen->refcnt, 1);
- dom_xen->domain_id = DOMID_XEN;
+ dom_xen = alloc_domain(DOMID_XEN);
+ BUG_ON(dom_xen == NULL);
/*
* Initialise our DOMID_IO domain.
* This domain owns I/O pages that are within the range of the page_info
* array. Mappings occur at the priv of the caller.
*/
- dom_io = alloc_domain();
- spin_lock_init(&dom_io->page_alloc_lock);
- atomic_set(&dom_io->refcnt, 1);
- dom_io->domain_id = DOMID_IO;
+ dom_io = alloc_domain(DOMID_IO);
+ BUG_ON(dom_io == NULL);
/* First 1MB of RAM is historically marked as I/O. */
for ( i = 0; i < 0x100; i++ )
struct domain *dom0;
-struct domain *domain_create(domid_t dom_id, unsigned int cpu)
+struct domain *domain_create(domid_t domid, unsigned int cpu)
{
struct domain *d, **pd;
struct vcpu *v;
- if ( (d = alloc_domain()) == NULL )
+ if ( (d = alloc_domain(domid)) == NULL )
return NULL;
- d->domain_id = dom_id;
-
- atomic_set(&d->refcnt, 1);
-
- spin_lock_init(&d->big_lock);
- spin_lock_init(&d->page_alloc_lock);
- INIT_LIST_HEAD(&d->page_list);
- INIT_LIST_HEAD(&d->xenpage_list);
-
rangeset_domain_initialise(d);
if ( !is_idle_domain(d) )
if ( !is_idle_domain(d) )
{
write_lock(&domlist_lock);
- pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
+ pd = &domain_list; /* NB. domain_list maintained in order of domid. */
for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
if ( (*pd)->domain_id > d->domain_id )
break;
d->next_in_list = *pd;
*pd = d;
- d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
- domain_hash[DOMAIN_HASH(dom_id)] = d;
+ d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
+ domain_hash[DOMAIN_HASH(domid)] = d;
write_unlock(&domlist_lock);
}
}
}
-struct domain *alloc_domain(void)
+struct domain *alloc_domain(domid_t domid)
{
struct domain *d;
- if ( (d = xmalloc(struct domain)) != NULL )
- memset(d, 0, sizeof(*d));
+ if ( (d = xmalloc(struct domain)) == NULL )
+ return NULL;
+
+ memset(d, 0, sizeof(*d));
+ d->domain_id = domid;
+ atomic_set(&d->refcnt, 1);
+ spin_lock_init(&d->big_lock);
+ spin_lock_init(&d->page_alloc_lock);
+ INIT_LIST_HEAD(&d->page_list);
+ INIT_LIST_HEAD(&d->xenpage_list);
return d;
}
struct vcpu *alloc_vcpu(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
-struct domain *alloc_domain(void);
+struct domain *alloc_domain(domid_t domid);
void free_domain(struct domain *d);
#define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */
}
extern struct domain *domain_create(
- domid_t dom_id, unsigned int cpu);
+ domid_t domid, unsigned int cpu);
extern int construct_dom0(
struct domain *d,
unsigned long image_start, unsigned long image_len,