d->domain_id = domid;
+ TRACE_1D(TRC_DOM0_DOM_ADD, d->domain_id);
+
lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain");
if ( (err = xsm_alloc_security_domain(d)) != 0 )
if ( atomic_cmpxchg(&d->refcnt, 0, DOMAIN_DESTROYED) != 0 )
return;
+ TRACE_1D(TRC_DOM0_DOM_REM, d->domain_id);
+
cpupool_rm_domain(d);
/* Delete from task list and task hashtable. */
- TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
spin_lock(&domlist_update_lock);
pd = &domain_list;
while ( *pd != d )
if ( v->sched_priv == NULL )
return 1;
- TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
-
/* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
if ( is_idle_domain(d) )
{
int sched_init_domain(struct domain *d)
{
SCHED_STAT_CRANK(dom_init);
+ TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
return SCHED_OP(DOM2OP(d), init_domain, d);
}
void sched_destroy_domain(struct domain *d)
{
SCHED_STAT_CRANK(dom_destroy);
+ TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
SCHED_OP(DOM2OP(d), destroy_domain, d);
}
((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
(_e & TRC_SCHED_EVT_MASK) )
+/* Trace classes for DOM0 operations */
+#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
+
/* Trace classes for Hardware */
#define TRC_HW_PM 0x00801000 /* Power management traces */
#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
+#define TRC_DOM0_DOM_ADD (TRC_DOM0_DOMOPS + 1)
+#define TRC_DOM0_DOM_REM (TRC_DOM0_DOMOPS + 2)
+
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)