inconsistent grant table state such as current
version, partially initialized active table pages,
etc.
- grant_table->maptrack_lock : spinlock used to protect the maptrack free list
+ grant_table->maptrack_lock : spinlock used to protect the maptrack limit
+ v->maptrack_freelist_lock : spinlock used to protect the maptrack free list
active_grant_entry->lock : spinlock used to serialize modifications to
active entries
The maptrack free list is protected by its own spinlock. The maptrack
lock may be locked while holding the grant table lock.
+ The maptrack_freelist_lock is an innermost lock. It may be locked
+ while holding other locks, but no other locks may be acquired within
+ it.
+
Active entries are obtained by calling active_entry_acquire(gt, ref).
This function returns a pointer to the active entry after locking its
spinlock. The caller must hold the grant table read lock before
{
unsigned int head, next, prev_head;
+ spin_lock(&v->maptrack_freelist_lock);
+
do {
/* No maptrack pages allocated for this VCPU yet? */
head = read_atomic(&v->maptrack_head);
if ( unlikely(head == MAPTRACK_TAIL) )
+ {
+ spin_unlock(&v->maptrack_freelist_lock);
return -1;
+ }
/*
* Always keep one entry in the free list to make it easier to
*/
next = read_atomic(&maptrack_entry(t, head).ref);
if ( unlikely(next == MAPTRACK_TAIL) )
+ {
+ spin_unlock(&v->maptrack_freelist_lock);
return -1;
+ }
prev_head = head;
head = cmpxchg(&v->maptrack_head, prev_head, next);
} while ( head != prev_head );
+ spin_unlock(&v->maptrack_freelist_lock);
+
return head;
}
/* 2. Add entry to the tail of the list on the original VCPU. */
v = currd->vcpu[maptrack_entry(t, handle).vcpu];
+ spin_lock(&v->maptrack_freelist_lock);
+
cur_tail = read_atomic(&v->maptrack_tail);
do {
prev_tail = cur_tail;
/* 3. Update the old tail entry to point to the new entry. */
write_atomic(&maptrack_entry(t, prev_tail).ref, handle);
+
+ spin_unlock(&v->maptrack_freelist_lock);
}
static inline int
*/
if ( nr_maptrack_frames(lgt) >= max_maptrack_frames )
{
- /*
- * Can drop the lock since no other VCPU can be adding a new
- * frame once they've run out.
- */
spin_unlock(&lgt->maptrack_lock);
/*
handle = steal_maptrack_handle(lgt, curr);
if ( handle == -1 )
return -1;
+ spin_lock(&curr->maptrack_freelist_lock);
+ maptrack_entry(lgt, handle).ref = MAPTRACK_TAIL;
curr->maptrack_tail = handle;
- write_atomic(&curr->maptrack_head, handle);
+ if ( curr->maptrack_head == MAPTRACK_TAIL )
+ write_atomic(&curr->maptrack_head, handle);
+ spin_unlock(&curr->maptrack_freelist_lock);
}
return steal_maptrack_handle(lgt, curr);
}
smp_wmb();
lgt->maptrack_limit += MAPTRACK_PER_PAGE;
+ spin_unlock(&lgt->maptrack_lock);
+ spin_lock(&curr->maptrack_freelist_lock);
+
do {
new_mt[i - 1].ref = read_atomic(&curr->maptrack_head);
head = cmpxchg(&curr->maptrack_head, new_mt[i - 1].ref, handle + 1);
} while ( head != new_mt[i - 1].ref );
- spin_unlock(&lgt->maptrack_lock);
+ spin_unlock(&curr->maptrack_freelist_lock);
return handle;
}
void grant_table_init_vcpu(struct vcpu *v)
{
+ spin_lock_init(&v->maptrack_freelist_lock);
v->maptrack_head = MAPTRACK_TAIL;
v->maptrack_tail = MAPTRACK_TAIL;
}