]> xenbits.xensource.com Git - people/julieng/linux-arm.git/commitdiff
arm64/mm: Split asid_inits in 2 parts
authorJulien Grall <julien.grall@arm.com>
Thu, 5 Jul 2018 13:57:15 +0000 (14:57 +0100)
committerJulien Grall <julien.grall@arm.com>
Thu, 20 Jun 2019 12:45:08 +0000 (13:45 +0100)
Move out the common initialization of the ASID allocator in a separate
function.

Signed-off-by: Julien Grall <julien.grall@arm.com>
arch/arm64/mm/context.c

index beba8e5b4100e669ad628bae7ab01f01cdb57d8e..81bc3d3654364a1f57c2f20ef203b308514c1f8f 100644 (file)
@@ -271,31 +271,50 @@ asmlinkage void post_ttbr_update_workaround(void)
                        CONFIG_CAVIUM_ERRATUM_27456));
 }
 
-static int asids_init(void)
+/*
+ * Initialize the ASID allocator
+ *
+ * @info: Pointer to the asid allocator structure
+ * @bits: Number of ASIDs available
+ * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
+ * allocated contiguously for a given context. This value should be a power of
+ * 2.
+ */
+static int asid_allocator_init(struct asid_info *info,
+                              u32 bits, unsigned int asid_per_ctxt)
 {
-       struct asid_info *info = &asid_info;
-
-       info->bits = get_cpu_asid_bits();
-       info->ctxt_shift = ilog2(ASID_PER_CONTEXT);
+       info->bits = bits;
+       info->ctxt_shift = ilog2(asid_per_ctxt);
        /*
         * Expect allocation after rollover to fail if we don't have at least
-        * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+        * one more ASID than CPUs. ASID #0 is always reserved.
         */
        WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
        atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
        info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
                            sizeof(*info->map), GFP_KERNEL);
        if (!info->map)
-               panic("Failed to allocate bitmap for %lu ASIDs\n",
-                     NUM_CTXT_ASIDS(info));
-
-       info->active = &active_asids;
-       info->reserved = &reserved_asids;
+               return -ENOMEM;
 
        raw_spin_lock_init(&info->lock);
 
+       return 0;
+}
+
+static int asids_init(void)
+{
+       u32 bits = get_cpu_asid_bits();
+
+       if (!asid_allocator_init(&asid_info, bits, ASID_PER_CONTEXT))
+               panic("Unable to initialize ASID allocator for %lu ASIDs\n",
+                     1UL << bits);
+
+       asid_info.active = &active_asids;
+       asid_info.reserved = &reserved_asids;
+
        pr_info("ASID allocator initialised with %lu entries\n",
-               NUM_CTXT_ASIDS(info));
+               NUM_CTXT_ASIDS(&asid_info));
+
        return 0;
 }
 early_initcall(asids_init);