struct vcpu *v;
struct mapcache_domain *dcache;
struct mapcache_vcpu *vcache;
+ struct mapcache *cache;
struct vcpu_maphash_entry *hashent;
+ struct domain *d;
#ifdef NDEBUG
if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
if ( !v || !is_pv_vcpu(v) )
return mfn_to_virt(mfn_x(mfn));
- dcache = &v->domain->arch.pv.mapcache;
+ d = v->domain;
+ dcache = &d->arch.pv.mapcache;
vcache = &v->arch.pv.mapcache;
- if ( !dcache->inuse )
+ cache = d->arch.asi ? &v->arch.pv.mapcache.cache
+ : &d->arch.pv.mapcache.cache;
+ if ( !cache->inuse )
return mfn_to_virt(mfn_x(mfn));
perfc_incr(map_domain_page_count);
if ( hashent->mfn == mfn_x(mfn) )
{
idx = hashent->idx;
- ASSERT(idx < dcache->entries);
+ ASSERT(idx < cache->entries);
hashent->refcnt++;
ASSERT(hashent->refcnt);
ASSERT(mfn_eq(l1e_get_mfn(MAPCACHE_L1ENT(idx)), mfn));
goto out;
}
- spin_lock(&dcache->lock);
+ if ( !d->arch.asi )
+ spin_lock(&dcache->lock);
/* Has some other CPU caused a wrap? We must flush if so. */
- if ( unlikely(dcache->epoch != vcache->shadow_epoch) )
+ if ( unlikely(!d->arch.asi && dcache->epoch != vcache->shadow_epoch) )
{
vcache->shadow_epoch = dcache->epoch;
if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) )
}
}
- idx = find_next_zero_bit(dcache->inuse, dcache->entries, dcache->cursor);
- if ( unlikely(idx >= dcache->entries) )
+ idx = find_next_zero_bit(cache->inuse, cache->entries, cache->cursor);
+ if ( unlikely(idx >= cache->entries) )
{
unsigned long accum = 0, prev = 0;
/* /First/, clean the garbage map and update the inuse list. */
- for ( i = 0; i < BITS_TO_LONGS(dcache->entries); i++ )
+ for ( i = 0; i < BITS_TO_LONGS(cache->entries); i++ )
{
accum |= prev;
- dcache->inuse[i] &= ~xchg(&dcache->garbage[i], 0);
- prev = ~dcache->inuse[i];
+ cache->inuse[i] &= ~xchg(&cache->garbage[i], 0);
+ prev = ~cache->inuse[i];
}
- if ( accum | (prev & BITMAP_LAST_WORD_MASK(dcache->entries)) )
- idx = find_first_zero_bit(dcache->inuse, dcache->entries);
+ if ( accum | (prev & BITMAP_LAST_WORD_MASK(cache->entries)) )
+ idx = find_first_zero_bit(cache->inuse, cache->entries);
else
{
/* Replace a hash entry instead. */
i = 0;
} while ( i != MAPHASH_HASHFN(mfn_x(mfn)) );
}
- BUG_ON(idx >= dcache->entries);
+ BUG_ON(idx >= cache->entries);
/* /Second/, flush TLBs. */
perfc_incr(domain_page_tlb_flush);
flush_tlb_local();
- vcache->shadow_epoch = ++dcache->epoch;
- dcache->tlbflush_timestamp = tlbflush_current_time();
+ if ( !d->arch.asi )
+ {
+ vcache->shadow_epoch = ++dcache->epoch;
+ dcache->tlbflush_timestamp = tlbflush_current_time();
+ }
}
- set_bit(idx, dcache->inuse);
- dcache->cursor = idx + 1;
+ set_bit(idx, cache->inuse);
+ cache->cursor = idx + 1;
- spin_unlock(&dcache->lock);
+ if ( !d->arch.asi )
+ spin_unlock(&dcache->lock);
l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_mfn(mfn, __PAGE_HYPERVISOR_RW));
unsigned int idx;
struct vcpu *v;
struct mapcache_domain *dcache;
+ struct mapcache *cache;
unsigned long va = (unsigned long)ptr, mfn, flags;
struct vcpu_maphash_entry *hashent;
ASSERT(v && is_pv_vcpu(v));
dcache = &v->domain->arch.pv.mapcache;
- ASSERT(dcache->inuse);
+ cache = v->domain->arch.asi ? &v->arch.pv.mapcache.cache
+ : &v->domain->arch.pv.mapcache.cache;
+ ASSERT(cache->inuse);
idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx));
hashent->mfn);
l1e_write(&MAPCACHE_L1ENT(hashent->idx), l1e_empty());
/* /Second/, mark as garbage. */
- set_bit(hashent->idx, dcache->garbage);
+ set_bit(hashent->idx, cache->garbage);
}
/* Add newly-freed mapping to the maphash. */
/* /First/, zap the PTE. */
l1e_write(&MAPCACHE_L1ENT(idx), l1e_empty());
/* /Second/, mark as garbage. */
- set_bit(idx, dcache->garbage);
+ set_bit(idx, cache->garbage);
}
local_irq_restore(flags);
void mapcache_domain_init(struct domain *d)
{
struct mapcache_domain *dcache = &d->arch.pv.mapcache;
- unsigned int bitmap_pages;
ASSERT(is_pv_domain(d));
return;
#endif
+ if ( d->arch.asi )
+ return;
+
BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20));
- bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long));
- dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE;
- dcache->garbage = dcache->inuse +
- (bitmap_pages + 1) * PAGE_SIZE / sizeof(long);
spin_lock_init(&dcache->lock);
}
{
struct domain *d = v->domain;
struct mapcache_domain *dcache = &d->arch.pv.mapcache;
+ struct mapcache *cache;
unsigned long i;
- unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
+ unsigned int ents = (d->arch.asi ? 1 : d->max_vcpus) *
+ MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
- if ( !is_pv_vcpu(v) || !dcache->inuse )
+ if ( !is_pv_vcpu(v) )
return 0;
- if ( ents > dcache->entries )
+ cache = d->arch.asi ? &v->arch.pv.mapcache.cache
+ : &dcache->cache;
+
+ if ( !cache->inuse )
+ return 0;
+
+ if ( ents > cache->entries )
{
/* Populate page tables. */
int rc = create_perdomain_mapping(v, MAPCACHE_VIRT_START, ents, false);
+ const unsigned int bitmap_pages =
+ PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long));
+
+ cache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE;
+ cache->garbage = cache->inuse +
+ (bitmap_pages + 1) * PAGE_SIZE / sizeof(long);
+
/* Populate bit maps. */
if ( !rc )
- rc = create_perdomain_mapping(v, (unsigned long)dcache->inuse,
+ rc = create_perdomain_mapping(v, (unsigned long)cache->inuse,
nr, true);
if ( !rc )
- rc = create_perdomain_mapping(v, (unsigned long)dcache->garbage,
+ rc = create_perdomain_mapping(v, (unsigned long)cache->garbage,
nr, true);
if ( rc )
return rc;
- dcache->entries = ents;
+ cache->entries = ents;
}
/* Mark all maphash entries as not in use. */