raw_spinlock_t lock;
/* Which CPU requires context flush on next call */
cpumask_t flush_pending;
+ /* Number of ASID allocated by context (shift value) */
+ unsigned int ctxt_shift;
} asid_info;
#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS(info) (ASID_FIRST_VERSION(info) >> 1)
-#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> 1)
-#define idx2asid(info, idx) (((idx) << 1) & ~ASID_MASK(info))
+#define ASID_PER_CONTEXT 2
#else
-#define NUM_USER_ASIDS(info) (ASID_FIRST_VERSION(info))
-#define asid2idx(info, asid) ((asid) & ~ASID_MASK(info))
-#define idx2asid(info, idx) asid2idx(info, idx)
+#define ASID_PER_CONTEXT 1
#endif
+#define NUM_CTXT_ASIDS(info) (ASID_FIRST_VERSION(info) >> (info)->ctxt_shift)
+#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
+#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
+
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
{
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(info->map, 0, NUM_USER_ASIDS(info));
+ bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
* a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
* pairs.
*/
- asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), cur_idx);
- if (asid != NUM_USER_ASIDS(info))
+ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
+ if (asid != NUM_CTXT_ASIDS(info))
goto set_asid;
/* We're out of ASIDs, so increment the global generation count */
flush_context(info);
/* We have more ASIDs than CPUs, so this will always succeed */
- asid = find_next_zero_bit(info->map, NUM_USER_ASIDS(info), 1);
+ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
set_asid:
__set_bit(asid, info->map);
struct asid_info *info = &asid_info;
info->bits = get_cpu_asid_bits();
+ info->ctxt_shift = ilog2(ASID_PER_CONTEXT);
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS(info) - 1 <= num_possible_cpus());
+ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
- info->map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS(info)),
+ info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
sizeof(*info->map), GFP_KERNEL);
if (!info->map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
- NUM_USER_ASIDS(info));
+ NUM_CTXT_ASIDS(info));
info->active = &active_asids;
info->reserved = &reserved_asids;
raw_spin_lock_init(&info->lock);
pr_info("ASID allocator initialised with %lu entries\n",
- NUM_USER_ASIDS(info));
+ NUM_CTXT_ASIDS(info));
return 0;
}
early_initcall(asids_init);