return -EINVAL;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Check remapping necessity */
prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
guest_physmap_add_page(d, xatp.gpfn, mfn);
out:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
rcu_unlock_domain(d);
if ( copy_from_guest(&info, arg, 1) )
break;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = map_vcpu_info(v, info.mfn, info.offset);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
break;
}
vc = &v->arch.guest_context;
/* Need to init this vcpu before loading its contents */
- LOCK_BIGLOCK(d);
+ domain_lock(d);
if ( !v->is_initialised )
if ( (rc = boot_vcpu(d, vcpuid, vc)) != 0 )
return rc;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
return -EINVAL;
vcpu_sleep_nosync(v);
/* Any other VCPUs online? ... */
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for_each_vcpu ( d, v )
if ( !test_bit(_VPF_down, &v->pause_flags) )
online_count++;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
/* ... Shut down the domain if not. */
if ( online_count == 0 )
goto out;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for ( i = 0; i < count; i++ )
{
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
perfc_add(num_mmuext_ops, i);
domain_mmap_cache_init(&mapcache);
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for ( i = 0; i < count; i++ )
{
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
domain_mmap_cache_destroy(&mapcache);
l1_pgentry_t ol1e;
struct domain *d = v->domain;
- ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(domain_is_locked(d));
adjust_guest_l1e(nl1e, d);
unsigned long gl1mfn;
int okay;
- ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(domain_is_locked(d));
adjust_guest_l1e(nl1e, d);
if ( rc )
return rc;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
pl1e = guest_map_l1e(v, va, &gl1mfn);
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
switch ( flags & UVMF_FLUSHTYPE_MASK )
{
if ( copy_from_guest(frames, frame_list, nr_pages) )
return -EFAULT;
- LOCK_BIGLOCK(curr->domain);
+ domain_lock(curr->domain);
if ( (ret = set_gdt(curr, frames, entries)) == 0 )
flush_tlb_local();
- UNLOCK_BIGLOCK(curr->domain);
+ domain_unlock(curr->domain);
return ret;
}
return -EINVAL;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Remove previously mapped page if it was present. */
prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
/* Map at new location. */
guest_physmap_add_page(d, xatp.gpfn, mfn);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
rcu_unlock_domain(d);
struct ptwr_emulate_ctxt ptwr_ctxt;
int rc;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Attempt to read the PTE that maps the VA being accessed. */
guest_get_eff_l1e(v, addr, &pte);
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
bail:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
return 0;
}
break;
case 3: /* Write CR3 */
- LOCK_BIGLOCK(v->domain);
+ domain_lock(v->domain);
if ( !is_pv_32on64_vcpu(v) )
rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
#ifdef CONFIG_COMPAT
else
rc = new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg)));
#endif
- UNLOCK_BIGLOCK(v->domain);
+ domain_unlock(v->domain);
if ( rc == 0 ) /* not okay */
goto fail;
break;
guest_handle_add_offset(frame_list, 1);
}
- LOCK_BIGLOCK(current->domain);
+ domain_lock(current->domain);
if ( (ret = set_gdt(current, frames, entries)) == 0 )
flush_tlb_local();
- UNLOCK_BIGLOCK(current->domain);
+ domain_unlock(current->domain);
return ret;
}
break;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EEXIST;
if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, cmp_ctxt);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
xfree(cmp_ctxt);
break;
}
atomic_set(&d->refcnt, 1);
- spin_lock_init(&d->big_lock);
+ spin_lock_init(&d->domain_lock);
spin_lock_init(&d->page_alloc_lock);
spin_lock_init(&d->shutdown_lock);
spin_lock_init(&d->hypercall_deadlock_mutex);
int rc;
domain_pause(d);
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = arch_vcpu_reset(v);
if ( rc != 0 )
clear_bit(_VPF_blocked, &v->pause_flags);
out:
- UNLOCK_BIGLOCK(v->domain);
+ domain_unlock(v->domain);
domain_unpause(d);
return rc;
return -EFAULT;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EEXIST;
if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, ctxt);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
xfree(ctxt);
break;
if ( count > 512 )
return -EINVAL;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EFAULT;
switch ( cmd )
}
out:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
return rc;
}
};
/* Per-domain lock can be recursively acquired in fault handlers. */
-#define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
-#define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
+#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
+#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
+#define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
struct domain
{
shared_info_t *shared_info; /* shared data area */
- spinlock_t big_lock;
+ spinlock_t domain_lock;
spinlock_t page_alloc_lock; /* protects all the following fields */
struct list_head page_list; /* linked list, of size tot_pages */