#define active_entry(t, e) \
((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
+/* Technically, we only really need to acquire the lock for SMP
+ guests, because you only ever touch the maptrack tables from the
+ context of the guest which owns them, so if it's uniproc then the
+ lock can't be contended, and is therefore pointless. Don't bother
+ with that optimisation for now, though, because it's scary and
+ confusing. */
+/* The maptrack lock is top-level: you're not allowed to be holding
+ any other locks when you acquire it. */
+static void
+maptrack_lock(struct grant_table *lgt)
+{
+ spin_lock(&lgt->maptrack_lock);
+}
+
+static void
+maptrack_unlock(struct grant_table *lgt)
+{
+ spin_unlock(&lgt->maptrack_lock);
+}
+
static inline int
__get_maptrack_handle(
struct grant_table *t)
if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
{
- spin_lock(&lgt->lock);
+ nr_frames = nr_maptrack_frames(lgt);
+ if ( nr_frames >= max_nr_maptrack_frames() )
+ return -1;
- if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
- {
- nr_frames = nr_maptrack_frames(lgt);
- if ( nr_frames >= max_nr_maptrack_frames() )
- {
- spin_unlock(&lgt->lock);
- return -1;
- }
+ new_mt = alloc_xenheap_page();
+ if ( new_mt == NULL )
+ return -1;
- new_mt = alloc_xenheap_page();
- if ( new_mt == NULL )
- {
- spin_unlock(&lgt->lock);
- return -1;
- }
-
- clear_page(new_mt);
-
- new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
-
- for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
- {
- new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
- new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
- }
+ clear_page(new_mt);
- lgt->maptrack[nr_frames] = new_mt;
- lgt->maptrack_limit = new_mt_limit;
+ new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
- gdprintk(XENLOG_INFO,
- "Increased maptrack size to %u frames.\n", nr_frames + 1);
- handle = __get_maptrack_handle(lgt);
+ for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
+ {
+ new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
+ new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
}
- spin_unlock(&lgt->lock);
+ lgt->maptrack[nr_frames] = new_mt;
+ lgt->maptrack_limit = new_mt_limit;
+
+ gdprintk(XENLOG_INFO,
+ "Increased maptrack size to %u frames.\n", nr_frames + 1);
+ handle = __get_maptrack_handle(lgt);
}
return handle;
}
guest_handle_cast(uop, gnttab_map_grant_ref_t);
if ( unlikely(!guest_handle_okay(map, count)) )
goto out;
+ maptrack_lock(current->domain->grant_table);
rc = gnttab_map_grant_ref(map, count);
+ maptrack_unlock(current->domain->grant_table);
break;
}
case GNTTABOP_unmap_grant_ref:
guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
if ( unlikely(!guest_handle_okay(unmap, count)) )
goto out;
+ maptrack_lock(current->domain->grant_table);
rc = gnttab_unmap_grant_ref(unmap, count);
+ maptrack_unlock(current->domain->grant_table);
break;
}
case GNTTABOP_unmap_and_replace:
rc = -ENOSYS;
if ( unlikely(!replace_grant_supported()) )
goto out;
+ maptrack_lock(current->domain->grant_table);
rc = gnttab_unmap_and_replace(unmap, count);
+ maptrack_unlock(current->domain->grant_table);
break;
}
case GNTTABOP_setup_table:
/* Simple stuff. */
memset(t, 0, sizeof(*t));
spin_lock_init(&t->lock);
+ spin_lock_init(&t->maptrack_lock);
t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
/* Active grant table. */
for ( handle = 0; handle < gt->maptrack_limit; handle++ )
{
+ /* Domain is dying, so don't need maptrack lock */
map = &maptrack_entry(gt, handle);
if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
continue;