/* Shared state beteen *_unmap and *_unmap_complete */
u16 flags;
unsigned long frame;
- struct grant_mapping *map;
struct domain *rd;
+ grant_ref_t ref;
};
/* Number of unmap operations that are done between each tlb flush */
struct grant_table *lgt, *rgt;
struct active_grant_entry *act;
s16 rc = 0;
+ struct grant_mapping *map;
+ bool put_handle = false;
ld = current->domain;
lgt = ld->grant_table;
return;
}
- op->map = &maptrack_entry(lgt, op->handle);
+ map = &maptrack_entry(lgt, op->handle);
grant_read_lock(lgt);
- if ( unlikely(!read_atomic(&op->map->flags)) )
+ if ( unlikely(!read_atomic(&map->flags)) )
{
grant_read_unlock(lgt);
gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
return;
}
- dom = op->map->domid;
+ dom = map->domid;
grant_read_unlock(lgt);
if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
grant_read_lock(rgt);
- op->flags = read_atomic(&op->map->flags);
- if ( unlikely(!op->flags) || unlikely(op->map->domid != dom) )
+ op->rd = rd;
+ op->ref = map->ref;
+
+ /*
+ * We can't assume there was no racing unmap for this maptrack entry,
+ * and hence we can't assume map->ref is valid for rd. While the checks
+ * below (with the active entry lock held) will reject any such racing
+ * requests, we still need to make sure we don't attempt to acquire an
+ * invalid lock.
+ */
+ smp_rmb();
+ if ( unlikely(op->ref >= nr_grant_entries(rgt)) )
{
- gdprintk(XENLOG_WARNING, "Unstable handle %u\n", op->handle);
+ gdprintk(XENLOG_WARNING, "Unstable handle %#x\n", op->handle);
rc = GNTST_bad_handle;
- goto unmap_out;
+ goto unlock_out;
}
- op->rd = rd;
- act = active_entry_acquire(rgt, op->map->ref);
+ act = active_entry_acquire(rgt, op->ref);
+
+ /*
+ * Note that we (ab)use the active entry lock here to protect against
+ * multiple unmaps of the same mapping here. We don't want to hold lgt's
+ * lock, and we only hold rgt's lock for reading (but the latter wouldn't
+ * be the right one anyway). Hence the easiest is to rely on a lock we
+ * hold anyway; see docs/misc/grant-tables.txt's "Locking" section.
+ */
+
+ op->flags = read_atomic(&map->flags);
+ smp_rmb();
+ if ( unlikely(!op->flags) || unlikely(map->domid != dom) ||
+ unlikely(map->ref != op->ref) )
+ {
+ gdprintk(XENLOG_WARNING, "Unstable handle %u\n", op->handle);
+ rc = GNTST_bad_handle;
+ goto act_release_out;
+ }
if ( op->frame == 0 )
{
"Bad frame number doesn't match gntref. (%lx != %lx)\n",
op->frame, act->frame);
- op->map->flags &= ~GNTMAP_device_map;
+ map->flags &= ~GNTMAP_device_map;
}
if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
op->flags)) < 0 )
goto act_release_out;
- op->map->flags &= ~GNTMAP_host_map;
+ map->flags &= ~GNTMAP_host_map;
+ }
+
+ if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
+ {
+ map->flags = 0;
+ put_handle = true;
}
act_release_out:
active_entry_release(act);
- unmap_out:
+ unlock_out:
grant_read_unlock(rgt);
+ if ( put_handle )
+ put_maptrack_handle(lgt, op->handle);
+
if ( rc == GNTST_okay && gnttab_need_iommu_mapping(ld) )
{
unsigned int kind;
grant_entry_header_t *sha;
struct page_info *pg;
uint16_t *status;
- bool_t put_handle = 0;
if ( rd == NULL )
{
if ( rgt->gt_version == 0 )
goto unlock_out;
- act = active_entry_acquire(rgt, op->map->ref);
- sha = shared_entry_header(rgt, op->map->ref);
+ act = active_entry_acquire(rgt, op->ref);
+ sha = shared_entry_header(rgt, op->ref);
if ( rgt->gt_version == 1 )
status = &sha->flags;
else
- status = &status_entry(rgt, op->map->ref);
+ status = &status_entry(rgt, op->ref);
if ( unlikely(op->frame != act->frame) )
{
act->pin -= GNTPIN_hstw_inc;
}
- if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
- put_handle = 1;
-
if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
!(op->flags & GNTMAP_readonly) )
gnttab_clear_flag(_GTF_writing, status);
unlock_out:
grant_read_unlock(rgt);
- if ( put_handle )
- {
- op->map->flags = 0;
- put_maptrack_handle(ld->grant_table, op->handle);
- }
rcu_unlock_domain(rd);
}