#define _active_entry(t, e) \
((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
+DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
+
static inline void gnttab_flush_tlb(const struct domain *d)
{
if ( !paging_mode_external(d) )
{
struct active_grant_entry *act;
- ASSERT(rw_is_locked(&t->lock));
+ /*
+ * The grant table for the active entry should be locked but the
+ * percpu rwlock cannot be checked for read lock without race conditions
+ * or high overhead so we cannot use an ASSERT
+ *
+ * ASSERT(rw_is_locked(&t->lock));
+ */
act = &_active_entry(t, e);
spin_lock(&act->lock);
*/
if ( lgt < rgt )
{
- write_lock(&lgt->lock);
- write_lock(&rgt->lock);
+ grant_write_lock(lgt);
+ grant_write_lock(rgt);
}
else
{
if ( lgt != rgt )
- write_lock(&rgt->lock);
- write_lock(&lgt->lock);
+ grant_write_lock(rgt);
+ grant_write_lock(lgt);
}
}
static inline void
double_gt_unlock(struct grant_table *lgt, struct grant_table *rgt)
{
- write_unlock(&lgt->lock);
+ grant_write_unlock(lgt);
if ( lgt != rgt )
- write_unlock(&rgt->lock);
+ grant_write_unlock(rgt);
}
static inline int
{
unsigned int ref, max_iter;
- ASSERT(rw_is_locked(&rgt->lock));
+ /*
+ * The remote grant table should be locked but the percpu rwlock
+ * cannot be checked for read lock without race conditions or high
+ * overhead so we cannot use an ASSERT
+ *
+ * ASSERT(rw_is_locked(&rgt->lock));
+ */
max_iter = min(*ref_count + (1 << GNTTABOP_CONTINUATION_ARG_SHIFT),
nr_grant_entries(rgt));
* Must have the local domain's grant table write lock when
* iterating over its maptrack entries.
*/
- ASSERT(rw_is_write_locked(&lgt->lock));
+ ASSERT(percpu_rw_is_write_locked(&lgt->lock));
/*
* Must have the remote domain's grant table write lock while
* counting its active entries.
*/
- ASSERT(rw_is_write_locked(&rd->grant_table->lock));
+ ASSERT(percpu_rw_is_write_locked(&rd->grant_table->lock));
for ( handle = 0; !(kind & MAPKIND_WRITE) &&
handle < lgt->maptrack_limit; handle++ )
}
rgt = rd->grant_table;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
/* Bounds check on the grant ref */
if ( unlikely(op->ref >= nr_grant_entries(rgt)))
cache_flags = (shah->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
/* pg may be set, with a refcount included, from __get_paged_frame */
if ( !pg )
put_page(pg);
}
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
act = active_entry_acquire(rgt, op->ref);
active_entry_release(act);
unlock_out:
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
op->status = rc;
put_maptrack_handle(lgt, handle);
rcu_unlock_domain(rd);
op->map = &maptrack_entry(lgt, op->handle);
- read_lock(&lgt->lock);
+ grant_read_lock(lgt);
if ( unlikely(!read_atomic(&op->map->flags)) )
{
- read_unlock(&lgt->lock);
+ grant_read_unlock(lgt);
gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
op->status = GNTST_bad_handle;
return;
}
dom = op->map->domid;
- read_unlock(&lgt->lock);
+ grant_read_unlock(lgt);
if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
{
rgt = rd->grant_table;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
op->flags = read_atomic(&op->map->flags);
if ( unlikely(!op->flags) || unlikely(op->map->domid != dom) )
act_release_out:
active_entry_release(act);
unmap_out:
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
if ( rc == GNTST_okay && gnttab_need_iommu_mapping(ld) )
{
rcu_lock_domain(rd);
rgt = rd->grant_table;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
if ( rgt->gt_version == 0 )
goto unlock_out;
act_release_out:
active_entry_release(act);
unlock_out:
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
if ( put_handle )
{
}
gt = d->grant_table;
- write_lock(>->lock);
+ grant_write_lock(gt);
if ( gt->gt_version == 0 )
gt->gt_version = 1;
}
out3:
- write_unlock(>->lock);
+ grant_write_unlock(gt);
out2:
rcu_unlock_domain(d);
out1:
goto query_out_unlock;
}
- read_lock(&d->grant_table->lock);
+ grant_read_lock(d->grant_table);
op.nr_frames = nr_grant_frames(d->grant_table);
op.max_nr_frames = max_grant_frames;
op.status = GNTST_okay;
- read_unlock(&d->grant_table->lock);
+ grant_read_unlock(d->grant_table);
query_out_unlock:
union grant_combo scombo, prev_scombo, new_scombo;
int retries = 0;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
if ( unlikely(ref >= nr_grant_entries(rgt)) )
{
scombo = prev_scombo;
}
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
return 1;
fail:
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
return 0;
}
TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
/* Tell the guest about its new page frame. */
- read_lock(&e->grant_table->lock);
+ grant_read_lock(e->grant_table);
act = active_entry_acquire(e->grant_table, gop.ref);
if ( e->grant_table->gt_version == 1 )
GTF_transfer_completed;
active_entry_release(act);
- read_unlock(&e->grant_table->lock);
+ grant_read_unlock(e->grant_table);
rcu_unlock_domain(e);
released_read = 0;
released_write = 0;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
act = active_entry_acquire(rgt, gref);
sha = shared_entry_header(rgt, gref);
}
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
if ( td != rd )
{
*page = NULL;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
if ( unlikely(gref >= nr_grant_entries(rgt)) )
PIN_FAIL(gt_unlock_out, GNTST_bad_gntref,
* here and reacquire
*/
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
rc = __acquire_grant_for_copy(td, trans_gref, rd->domain_id,
readonly, &grant_frame, page,
&trans_page_off, &trans_length, 0);
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
act = active_entry_acquire(rgt, gref);
if ( rc != GNTST_okay ) {
__fixup_status_for_copy_pin(act, status);
rcu_unlock_domain(td);
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
return rc;
}
__fixup_status_for_copy_pin(act, status);
rcu_unlock_domain(td);
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
put_page(*page);
return __acquire_grant_for_copy(rd, gref, ldom, readonly,
frame, page, page_off, length,
*frame = act->frame;
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
return rc;
unlock_out_clear:
active_entry_release(act);
gt_unlock_out:
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
return rc;
}
if ( gt->gt_version == op.version )
goto out;
- write_lock(>->lock);
+ grant_write_lock(gt);
/*
* Make sure that the grant table isn't currently in use when we
* change the version number, except for the first 8 entries which
gt->gt_version = op.version;
out_unlock:
- write_unlock(>->lock);
+ grant_write_unlock(gt);
out:
op.version = gt->gt_version;
op.status = GNTST_okay;
- read_lock(>->lock);
+ grant_read_lock(gt);
for ( i = 0; i < op.nr_frames; i++ )
{
op.status = GNTST_bad_virt_addr;
}
- read_unlock(>->lock);
+ grant_read_unlock(gt);
out2:
rcu_unlock_domain(d);
out1:
struct active_grant_entry *act_b = NULL;
s16 rc = GNTST_okay;
- write_lock(>->lock);
+ grant_write_lock(gt);
/* Bounds check on the grant refs */
if ( unlikely(ref_a >= nr_grant_entries(d->grant_table)))
active_entry_release(act_b);
if ( act_a != NULL )
active_entry_release(act_a);
- write_unlock(>->lock);
+ grant_write_unlock(gt);
rcu_unlock_domain(d);
if ( d != owner )
{
- read_lock(&owner->grant_table->lock);
+ grant_read_lock(owner->grant_table);
ret = grant_map_exists(d, owner->grant_table, mfn, ref_count);
if ( ret != 0 )
{
- read_unlock(&owner->grant_table->lock);
+ grant_read_unlock(owner->grant_table);
rcu_unlock_domain(d);
put_page(page);
return ret;
ret = 0;
if ( d != owner )
- read_unlock(&owner->grant_table->lock);
+ grant_read_unlock(owner->grant_table);
unmap_domain_page(v);
put_page(page);
goto no_mem_0;
/* Simple stuff. */
- rwlock_init(&t->lock);
+ percpu_rwlock_resource_init(&t->lock, grant_rwlock);
spin_lock_init(&t->maptrack_lock);
t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
}
rgt = rd->grant_table;
- read_lock(&rgt->lock);
+ grant_read_lock(rgt);
act = active_entry_acquire(rgt, ref);
sha = shared_entry_header(rgt, ref);
gnttab_clear_flag(_GTF_reading, status);
active_entry_release(act);
- read_unlock(&rgt->lock);
+ grant_read_unlock(rgt);
rcu_unlock_domain(rd);
#define WARN_GRANT_MAX 10
- read_lock(>->lock);
+ grant_read_lock(gt);
for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
{
printk(XENLOG_G_DEBUG "Dom%d has too many (%d) active grants to report\n",
d->domain_id, nr_active);
- read_unlock(>->lock);
+ grant_read_unlock(gt);
#undef WARN_GRANT_MAX
}
printk(" -------- active -------- -------- shared --------\n");
printk("[ref] localdom mfn pin localdom gmfn flags\n");
- read_lock(>->lock);
+ grant_read_lock(gt);
for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
{
active_entry_release(act);
}
- read_unlock(>->lock);
+ grant_read_unlock(gt);
if ( first )
printk("grant-table for remote domain:%5d ... "