int ret = 0;
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
page_list_for_each_safe( page, tmp, list )
{
}
out:
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
return ret;
}
{
bool done = false;
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
for ( i = 0; ; )
{
break;
}
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
if ( !done )
return -ERESTART;
int ret = 0;
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
while ( (page = page_list_remove_head(list)) )
{
page_list_move(list, &d->arch.relmem_list);
out:
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
return ret;
}
int rc = 0;
bool drop_dom_ref = false;
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
if ( d->is_dying )
{
}
out:
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
if ( drop_dom_ref )
put_domain(d);
goto state;
/* need recursive lock because we will free pages */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
page_list_for_each_safe(page, tmp, &d->page_list)
{
shr_handle_t sh;
put_page_alloc_ref(page);
put_page_and_type(page);
}
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
state:
if ( reset_state )
{
if ( !((mm_locked_by_me(l)) && rec) )
_check_lock_level(d, level);
- spin_lock_recursive(&l->lock);
+ rspin_lock(&l->lock);
if ( l->lock.recurse_cnt == 1 )
{
l->locker_function = func;
l->locker_function = "nobody";
_set_lock_level(l->unlock_level);
}
- spin_unlock_recursive(&l->lock);
+ rspin_unlock(&l->lock);
}
static inline void mm_enforce_order_unlock(int unlock_level,
unsigned int id;
bool found = false;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
}
}
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return found;
}
return -ENOMEM;
domain_pause(d);
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
if ( id )
*id = i;
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
domain_unpause(d);
return 0;
fail:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
domain_unpause(d);
xfree(s);
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
ASSERT(is_hvm_domain(d));
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
}
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_add_range(r, start, end);
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_remove_range(r, start, end);
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = arch_ioreq_server_map_mem_type(d, s, flags);
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
if ( rc == 0 )
arch_ioreq_server_map_mem_type_completed(d, s, flags);
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
unsigned int id;
int rc;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
goto fail;
}
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return 0;
ioreq_server_remove_vcpu(s, v);
}
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
return rc;
}
struct ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
ioreq_server_remove_vcpu(s, v);
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
}
void ioreq_server_destroy_all(struct domain *d)
if ( !arch_ioreq_server_destroy_all(d) )
return;
- spin_lock_recursive(&d->ioreq_server.lock);
+ rspin_lock(&d->ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
xfree(s);
}
- spin_unlock_recursive(&d->ioreq_server.lock);
+ rspin_unlock(&d->ioreq_server.lock);
}
struct ioreq_server *ioreq_server_select(struct domain *d,
if ( unlikely(is_xen_heap_page(pg)) )
{
/* NB. May recursively lock from relinquish_memory(). */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
for ( i = 0; i < (1 << order); i++ )
arch_free_heap_page(d, &pg[i]);
d->xenheap_pages -= 1 << order;
drop_dom_ref = (d->xenheap_pages == 0);
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
}
else
{
if ( likely(d) && likely(d != dom_cow) )
{
/* NB. May recursively lock from relinquish_memory(). */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
for ( i = 0; i < (1 << order); i++ )
{
drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
/*
* Normally we expect a domain to clear pages before freeing them,
ASSERT_ALLOC_CONTEXT();
/* NB. May recursively lock from relinquish_memory(). */
- spin_lock_recursive(&d->page_alloc_lock);
+ rspin_lock(&d->page_alloc_lock);
arch_free_heap_page(d, page);
/* Add page on the resv_page_list *after* it has been freed. */
page_list_add_tail(page, &d->resv_page_list);
- spin_unlock_recursive(&d->page_alloc_lock);
+ rspin_unlock(&d->page_alloc_lock);
if ( drop_dom_ref )
put_domain(d);
smp_mb();
}
-int _spin_trylock_recursive(spinlock_t *lock)
+bool _rspin_trylock(rspinlock_t *lock)
{
unsigned int cpu = smp_processor_id();
if ( likely(lock->recurse_cpu != cpu) )
{
- if ( !spin_trylock(lock) )
- return 0;
+ if ( !_spin_trylock(lock) )
+ return false;
lock->recurse_cpu = cpu;
}
ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
lock->recurse_cnt++;
- return 1;
+ return true;
}
-void _spin_lock_recursive(spinlock_t *lock)
+void _rspin_lock(rspinlock_t *lock)
{
unsigned int cpu = smp_processor_id();
lock->recurse_cnt++;
}
-void _spin_unlock_recursive(spinlock_t *lock)
+void _rspin_unlock(rspinlock_t *lock)
{
if ( likely(--lock->recurse_cnt == 0) )
{
/* console_lock can be acquired recursively from __printk_ratelimit(). */
local_irq_save(flags);
- spin_lock_recursive(&console_lock);
+ rspin_lock(&console_lock);
state = &this_cpu(state);
(void)vsnprintf(buf, sizeof(buf), fmt, args);
state->continued = 1;
}
- spin_unlock_recursive(&console_lock);
+ rspin_unlock(&console_lock);
local_irq_restore(flags);
}
unsigned long flags;
local_irq_save(flags);
- spin_lock_recursive(&console_lock);
+ rspin_lock(&console_lock);
return flags;
}
void console_unlock_recursive_irqrestore(unsigned long flags)
{
- spin_unlock_recursive(&console_lock);
+ rspin_unlock(&console_lock);
local_irq_restore(flags);
}
char lost_str[8];
snprintf(lost_str, sizeof(lost_str), "%d", lost);
/* console_lock may already be acquired by printk(). */
- spin_lock_recursive(&console_lock);
+ rspin_lock(&console_lock);
printk_start_of_line("(XEN) ");
__putstr("printk: ");
__putstr(lost_str);
__putstr(" messages suppressed.\n");
- spin_unlock_recursive(&console_lock);
+ rspin_unlock(&console_lock);
}
local_irq_restore(flags);
return 1;
/* Do not use, as it has no speculation barrier, use pcidevs_lock() instead. */
void pcidevs_lock_unsafe(void)
{
- _spin_lock_recursive(&_pcidevs_lock);
+ _rspin_lock(&_pcidevs_lock);
}
void pcidevs_unlock(void)
{
- spin_unlock_recursive(&_pcidevs_lock);
+ rspin_unlock(&_pcidevs_lock);
}
bool pcidevs_locked(void)
(v) = (v)->next_in_list )
/* Per-domain lock can be recursively acquired in fault handlers. */
-#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
-#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
+#define domain_lock(d) rspin_lock(&(d)->domain_lock)
+#define domain_unlock(d) rspin_unlock(&(d)->domain_lock)
struct evtchn_port_ops;
int _spin_trylock(spinlock_t *lock);
void _spin_barrier(spinlock_t *lock);
-int _spin_trylock_recursive(spinlock_t *lock);
-void _spin_lock_recursive(spinlock_t *lock);
-void _spin_unlock_recursive(spinlock_t *lock);
-
static always_inline void spin_lock(spinlock_t *l)
{
_spin_lock(l);
#define spin_barrier(l) _spin_barrier(l)
/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * rspin_[un]lock(): Use these forms when the lock can (safely!) be
* reentered recursively on the same CPU. All critical regions that may form
* part of a recursively-nested set must be protected by these forms. If there
* are any critical regions that cannot form part of such a set, they can use
* standard spin_[un]lock().
*/
-#define spin_trylock_recursive(l) \
- lock_evaluate_nospec(_spin_trylock_recursive(l))
+bool _rspin_trylock(rspinlock_t *lock);
+void _rspin_lock(rspinlock_t *lock);
+void _rspin_unlock(rspinlock_t *lock);
-static always_inline void spin_lock_recursive(spinlock_t *l)
+static always_inline void rspin_lock(rspinlock_t *lock)
{
- _spin_lock_recursive(l);
+ _rspin_lock(lock);
block_lock_speculation();
}
-#define spin_unlock_recursive(l) _spin_unlock_recursive(l)
+#define rspin_trylock(l) lock_evaluate_nospec(_rspin_trylock(l))
+#define rspin_unlock(l) _rspin_unlock(l)
#endif /* __SPINLOCK_H__ */