]> xenbits.xensource.com Git - xen.git/commitdiff
Move cpu hotplug routines into common cpu.c file.
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 14 May 2010 16:07:52 +0000 (17:07 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 14 May 2010 16:07:52 +0000 (17:07 +0100)
Also simplify the locking (reverting to use if spin_trylock, as
returning EBUSY/EAGAIN seems unavoidable after all). In particular
this should continue to ensure that stop_machine_run() does not have
cpu_online_map change under its feet.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
12 files changed:
xen/arch/ia64/xen/xensetup.c
xen/arch/x86/acpi/power.c
xen/arch/x86/platform_hypercall.c
xen/arch/x86/setup.c
xen/arch/x86/smpboot.c
xen/arch/x86/sysctl.c
xen/common/cpu.c
xen/common/spinlock.c
xen/common/stop_machine.c
xen/include/asm-x86/smp.h
xen/include/xen/cpu.h
xen/include/xen/spinlock.h

index a7e4791771a4886b83069ebc3c25036ca0ea9ef6..e3d9d3601153d27d8f3b20f68e89920b5fd58068 100644 (file)
@@ -32,6 +32,7 @@
 #include <xsm/acm/acm_hooks.h>
 #include <asm/sn/simulator.h>
 #include <asm/sal.h>
+#include <xen/cpu.h>
 
 unsigned long total_pages;
 
index 9efae905c2ee31cf0c83cdde787da1ba1ea5c82f..c62f122882903a62d162cee03cfc2267e6647689 100644 (file)
@@ -25,6 +25,7 @@
 #include <xen/domain.h>
 #include <xen/console.h>
 #include <xen/iommu.h>
+#include <xen/cpu.h>
 #include <public/platform.h>
 #include <asm/tboot.h>
 
@@ -138,12 +139,8 @@ static int enter_state(u32 state)
 
     freeze_domains();
 
-    disable_nonboot_cpus();
-    if ( num_online_cpus() != 1 )
-    {
-        error = -EBUSY;
+    if ( (error = disable_nonboot_cpus()) )
         goto enable_cpu;
-    }
 
     cpufreq_del_cpu(0);
 
@@ -207,7 +204,9 @@ static int enter_state(u32 state)
  enable_cpu:
     cpufreq_add_cpu(0);
     microcode_resume_cpu(0);
+    mtrr_aps_sync_begin();
     enable_nonboot_cpus();
+    mtrr_aps_sync_end();
     thaw_domains();
     spin_unlock(&pm_lock);
     return error;
index b3ab4b7446417252606155d9e00a35cebb9e457d..2ab7617a86194c4b7cc4dd97ad41dfabafb56a5b 100644 (file)
@@ -410,7 +410,11 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
 
         g_info = &op->u.pcpu_info;
 
-        spin_lock(&cpu_add_remove_lock);
+        if ( !get_cpu_maps() )
+        {
+            ret = -EBUSY;
+            break;
+        }
 
         if ( (g_info->xen_cpuid >= NR_CPUS) ||
              (g_info->xen_cpuid < 0) ||
@@ -429,7 +433,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
 
         g_info->max_present = last_cpu(cpu_present_map);
 
-        spin_unlock(&cpu_add_remove_lock);
+        put_cpu_maps();
 
         ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
     }
index d6c07f0895141d43ac389cf6aa8bd2f117c5a47c..9d6036f300bb5fd4f81b3f7487ba1433689e8c68 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/bzimage.h> /* for bzimage_headroom */
 #include <asm/mach-generic/mach_apic.h> /* for generic_apic_probe */
 #include <asm/setup.h>
+#include <xen/cpu.h>
 
 #if defined(CONFIG_X86_64)
 #define BOOTSTRAP_DIRECTMAP_END (1UL << 32) /* 4GB */
index 8b857b38097075f847e0821b3a6de18f6ea8f7e0..50ac6619263df3f90e545cf860499b235a00841f 100644 (file)
@@ -46,7 +46,6 @@
 #include <xen/tasklet.h>
 #include <xen/serial.h>
 #include <xen/numa.h>
-#include <xen/event.h>
 #include <xen/cpu.h>
 #include <asm/current.h>
 #include <asm/mc146818rtc.h>
@@ -58,7 +57,6 @@
 #include <mach_apic.h>
 #include <mach_wakecpu.h>
 #include <smpboot_hooks.h>
-#include <xen/stop_machine.h>
 #include <acpi/cpufreq/processor_perf.h>
 
 #define setup_trampoline()    (bootsym_phys(trampoline_realmode_entry))
@@ -1310,169 +1308,9 @@ void __cpu_die(unsigned int cpu)
        }
 }
 
-static int take_cpu_down(void *unused)
-{
-       void *hcpu = (void *)(long)smp_processor_id();
-       int rc;
-
-       spin_lock(&cpu_add_remove_lock);
-
-       if (cpu_notifier_call_chain(CPU_DYING, hcpu) != NOTIFY_DONE)
-               BUG();
-
-       rc = __cpu_disable();
-
-       spin_unlock(&cpu_add_remove_lock);
-
-       return rc;
-}
-
-/*
- * Protects against concurrent offline/online requests for a single CPU.
- * We need this extra protection because cpu_down() cannot continuously hold
- * the cpu_add_remove_lock, as it cannot be held across stop_machine_run().
- */
-static cpumask_t cpu_offlining;
-
-int cpu_down(unsigned int cpu)
-{
-       int err, notifier_rc, nr_calls;
-       void *hcpu = (void *)(long)cpu;
-
-       spin_lock(&cpu_add_remove_lock);
-
-       if ((cpu == 0) || !cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) {
-               spin_unlock(&cpu_add_remove_lock);
-               return -EINVAL;
-       }
-
-       cpu_set(cpu, cpu_offlining);
-
-       printk("Prepare to bring CPU%d down...\n", cpu);
-
-       notifier_rc = __cpu_notifier_call_chain(
-               CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
-       if (notifier_rc != NOTIFY_DONE) {
-               err = notifier_to_errno(notifier_rc);
-               nr_calls--;
-               notifier_rc = __cpu_notifier_call_chain(
-                       CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
-               BUG_ON(notifier_rc != NOTIFY_DONE);
-               goto out;
-       }
-
-       spin_unlock(&cpu_add_remove_lock);
-       err = stop_machine_run(take_cpu_down, NULL, cpu);
-       spin_lock(&cpu_add_remove_lock);
-
-       if (err < 0) {
-               notifier_rc = cpu_notifier_call_chain(CPU_DOWN_FAILED, hcpu);
-               BUG_ON(notifier_rc != NOTIFY_DONE);
-               goto out;
-       }
-
-       __cpu_die(cpu);
-       BUG_ON(cpu_online(cpu));
-
-       notifier_rc = cpu_notifier_call_chain(CPU_DEAD, hcpu);
-       BUG_ON(notifier_rc != NOTIFY_DONE);
-
-out:
-       if (!err) {
-               printk("CPU %u is now offline\n", cpu);
-               send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
-       } else {
-               printk("Failed to take down CPU %u (error %d)\n", cpu, err);
-       }
-       cpu_clear(cpu, cpu_offlining);
-       spin_unlock(&cpu_add_remove_lock);
-       return err;
-}
-
-int cpu_up(unsigned int cpu)
-{
-       int err = 0;
-
-       spin_lock(&cpu_add_remove_lock);
-
-       if (cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       err = __cpu_up(cpu);
-       if (err < 0)
-               goto out;
-
-out:
-       if (!err)
-               send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
-       spin_unlock(&cpu_add_remove_lock);
-       return err;
-}
-
-/* From kernel/power/main.c */
-/* This is protected by pm_sem semaphore */
-static cpumask_t frozen_cpus;
-
-void disable_nonboot_cpus(void)
-{
-       int cpu, error;
-
-       error = 0;
-       cpus_clear(frozen_cpus);
-       printk("Freezing cpus ...\n");
-       for_each_online_cpu(cpu) {
-               if (cpu == 0)
-                       continue;
-               error = cpu_down(cpu);
-               /* No need to check EBUSY here */
-               ASSERT(error != -EBUSY);
-               if (!error) {
-                       cpu_set(cpu, frozen_cpus);
-                       printk("CPU%d is down\n", cpu);
-                       continue;
-               }
-               printk("Error taking cpu %d down: %d\n", cpu, error);
-       }
-       BUG_ON(raw_smp_processor_id() != 0);
-       if (error)
-               panic("cpus not sleeping");
-}
-
-void enable_nonboot_cpus(void)
-{
-       int cpu, error;
-
-       printk("Thawing cpus ...\n");
-       mtrr_aps_sync_begin();
-       for_each_cpu_mask(cpu, frozen_cpus) {
-               error = cpu_up(cpu);
-               /* No conflict will happen here */
-               ASSERT(error != -EBUSY);
-               if (!error) {
-                       printk("CPU%d is up\n", cpu);
-                       continue;
-               }
-               printk("Error taking cpu %d up: %d\n", cpu, error);
-               panic("Not enough cpus");
-       }
-       mtrr_aps_sync_end();
-       cpus_clear(frozen_cpus);
-
-       /*
-        * Cleanup possible dangling ends after sleep...
-        */
-       smpboot_restore_warm_reset_vector();
-}
-
 int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm)
 {
-       int cpu = -1;
-
-#ifndef CONFIG_ACPI
-       return -ENOSYS;
-#endif
+       int node, cpu = -1;
 
        dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n",
                apic_id, acpi_id, pxm);
@@ -1480,68 +1318,53 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm)
        if ( acpi_id > MAX_MADT_ENTRIES || apic_id > MAX_APICS || pxm > 256 )
                return -EINVAL;
 
+       if ( !cpu_hotplug_begin() )
+               return -EBUSY;
+
        /* Detect if the cpu has been added before */
-       if ( x86_acpiid_to_apicid[acpi_id] != 0xff)
+       if ( x86_acpiid_to_apicid[acpi_id] != 0xff )
        {
-               if (x86_acpiid_to_apicid[acpi_id] != apic_id)
-                       return -EINVAL;
-               else
-                       return -EEXIST;
+               cpu = (x86_acpiid_to_apicid[acpi_id] != apic_id)
+                       ? -EINVAL : -EEXIST;
+               goto out;
        }
 
        if ( physid_isset(apic_id, phys_cpu_present_map) )
-               return -EEXIST;
-
-       spin_lock(&cpu_add_remove_lock);
-
-       cpu = mp_register_lapic(apic_id, 1);
-
-       if (cpu < 0)
        {
-               spin_unlock(&cpu_add_remove_lock);
-               return cpu;
+               cpu = -EEXIST;
+               goto out;
        }
 
+       if ( (cpu = mp_register_lapic(apic_id, 1)) < 0 )
+               goto out;
+
        x86_acpiid_to_apicid[acpi_id] = apic_id;
 
        if ( !srat_disabled() )
        {
-               int node;
-
-               node = setup_node(pxm);
-               if (node < 0)
+               if ( (node = setup_node(pxm)) < 0 )
                {
                        dprintk(XENLOG_WARNING,
                                "Setup node failed for pxm %x\n", pxm);
                        x86_acpiid_to_apicid[acpi_id] = 0xff;
                        mp_unregister_lapic(apic_id, cpu);
-                       spin_unlock(&cpu_add_remove_lock);
-                       return node;
+                       cpu = node;
+                       goto out;
                }
                apicid_to_node[apic_id] = node;
        }
 
        srat_detect_node(cpu);
        numa_add_cpu(cpu);
-       spin_unlock(&cpu_add_remove_lock);
        dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu);
+ out:
+       cpu_hotplug_done();
        return cpu;
 }
 
 
 int __devinit __cpu_up(unsigned int cpu)
 {
-       int notifier_rc, ret = 0, nr_calls;
-       void *hcpu = (void *)(long)cpu;
-
-       notifier_rc = __cpu_notifier_call_chain(
-               CPU_UP_PREPARE, hcpu, -1, &nr_calls);
-       if (notifier_rc != NOTIFY_DONE) {
-               ret = notifier_to_errno(notifier_rc);
-               nr_calls--;
-               goto fail;
-       }
-
        /*
         * We do warm boot only on cpus that had booted earlier
         * Otherwise cold boot is all handled from smp_boot_cpus().
@@ -1549,20 +1372,15 @@ int __devinit __cpu_up(unsigned int cpu)
         * when a cpu is taken offline from cpu_exit_clear().
         */
        if (!cpu_isset(cpu, cpu_callin_map)) {
-               ret = __smp_prepare_cpu(cpu);
+               if (__smp_prepare_cpu(cpu))
+                       return -EIO;
                smpboot_restore_warm_reset_vector();
        }
 
-       if (ret) {
-               ret = -EIO;
-               goto fail;
-       }
-
        /* In case one didn't come up */
        if (!cpu_isset(cpu, cpu_callin_map)) {
                printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
-               ret = -EIO;
-               goto fail;
+               return -EIO;
        }
 
        /* Unleash the CPU! */
@@ -1572,15 +1390,7 @@ int __devinit __cpu_up(unsigned int cpu)
                process_pending_softirqs();
        }
 
-       notifier_rc = cpu_notifier_call_chain(CPU_ONLINE, hcpu);
-       BUG_ON(notifier_rc != NOTIFY_DONE);
        return 0;
-
- fail:
-       notifier_rc = __cpu_notifier_call_chain(
-               CPU_UP_CANCELED, hcpu, nr_calls, NULL);
-       BUG_ON(notifier_rc != NOTIFY_DONE);
-       return ret;
 }
 
 
index 4ffc76b145851f99384d90847ca6e64f523519cd..5d22c5acb538661fd9feb618cbea88d0e9786bac 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/processor.h>
 #include <asm/numa.h>
 #include <xen/nodemask.h>
+#include <xen/cpu.h>
 #include <xsm/xsm.h>
 
 #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
index 8a04dd449ea206ba3fe008511de2b2ffc73ee090..82a111de98f72b93433ce01d4c750d66cb95c3c3 100644 (file)
@@ -1,6 +1,9 @@
 #include <xen/config.h>
 #include <xen/cpumask.h>
 #include <xen/cpu.h>
+#include <xen/event.h>
+#include <xen/sched.h>
+#include <xen/stop_machine.h>
 
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -26,35 +29,195 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 #endif
 };
 
-DEFINE_SPINLOCK(cpu_add_remove_lock);
+static DEFINE_SPINLOCK(cpu_add_remove_lock);
+
+bool_t get_cpu_maps(void)
+{
+    return spin_trylock_recursive(&cpu_add_remove_lock);
+}
+
+void put_cpu_maps(void)
+{
+    spin_unlock_recursive(&cpu_add_remove_lock);
+}
+
+bool_t cpu_hotplug_begin(void)
+{
+    return get_cpu_maps();
+}
+
+void cpu_hotplug_done(void)
+{
+    put_cpu_maps();
+}
 
 static RAW_NOTIFIER_HEAD(cpu_chain);
 
 int register_cpu_notifier(struct notifier_block *nb)
 {
     int ret;
-    spin_lock(&cpu_add_remove_lock);
+    if ( !spin_trylock(&cpu_add_remove_lock) )
+        BUG(); /* Should never fail as we are called only during boot. */
     ret = raw_notifier_chain_register(&cpu_chain, nb);
     spin_unlock(&cpu_add_remove_lock);
     return ret;
 }
 
-void unregister_cpu_notifier(struct notifier_block *nb)
+static int take_cpu_down(void *unused)
 {
-    spin_lock(&cpu_add_remove_lock);
-    raw_notifier_chain_unregister(&cpu_chain, nb);
-    spin_unlock(&cpu_add_remove_lock);
+    void *hcpu = (void *)(long)smp_processor_id();
+    if ( raw_notifier_call_chain(&cpu_chain, CPU_DYING, hcpu) != NOTIFY_DONE )
+        BUG();
+    return __cpu_disable();
+}
+
+int cpu_down(unsigned int cpu)
+{
+    int err, notifier_rc, nr_calls;
+    void *hcpu = (void *)(long)cpu;
+
+    if ( !cpu_hotplug_begin() )
+        return -EBUSY;
+
+    if ( (cpu == 0) || !cpu_online(cpu) )
+    {
+        cpu_hotplug_done();
+        return -EINVAL;
+    }
+
+    printk("Prepare to bring CPU%d down...\n", cpu);
+
+    notifier_rc = __raw_notifier_call_chain(
+        &cpu_chain, CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
+    if ( notifier_rc != NOTIFY_DONE )
+    {
+        err = notifier_to_errno(notifier_rc);
+        nr_calls--;
+        notifier_rc = __raw_notifier_call_chain(
+            &cpu_chain, CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
+        BUG_ON(notifier_rc != NOTIFY_DONE);
+        goto out;
+    }
+
+    if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
+    {
+        notifier_rc = raw_notifier_call_chain(
+            &cpu_chain, CPU_DOWN_FAILED, hcpu);
+        BUG_ON(notifier_rc != NOTIFY_DONE);
+        goto out;
+    }
+
+    __cpu_die(cpu);
+    BUG_ON(cpu_online(cpu));
+
+    notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu);
+    BUG_ON(notifier_rc != NOTIFY_DONE);
+
+ out:
+    if ( !err )
+    {
+        printk("CPU %u is now offline\n", cpu);
+        send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+    }
+    else
+    {
+        printk("Failed to take down CPU %u (error %d)\n", cpu, err);
+    }
+    cpu_hotplug_done();
+    return err;
 }
 
-int cpu_notifier_call_chain(unsigned long val, void *v)
+int cpu_up(unsigned int cpu)
 {
-    BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
-    return raw_notifier_call_chain(&cpu_chain, val, v);
+    int notifier_rc, nr_calls, err = 0;
+    void *hcpu = (void *)(long)cpu;
+
+    if ( !cpu_hotplug_begin() )
+        return -EBUSY;
+
+    if ( cpu_online(cpu) || !cpu_present(cpu) )
+    {
+        cpu_hotplug_done();
+        return -EINVAL;
+    }
+
+    notifier_rc = __raw_notifier_call_chain(
+        &cpu_chain, CPU_UP_PREPARE, hcpu, -1, &nr_calls);
+    if ( notifier_rc != NOTIFY_DONE )
+    {
+        err = notifier_to_errno(notifier_rc);
+        nr_calls--;
+        goto fail;
+    }
+
+    err = __cpu_up(cpu);
+    if ( err < 0 )
+        goto fail;
+
+    notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+    BUG_ON(notifier_rc != NOTIFY_DONE);
+
+    send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+
+    cpu_hotplug_done();
+    return 0;
+
+ fail:
+    notifier_rc = __raw_notifier_call_chain(
+        &cpu_chain, CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+    BUG_ON(notifier_rc != NOTIFY_DONE);
+    cpu_hotplug_done();
+    return err;
 }
 
-int __cpu_notifier_call_chain(
-    unsigned long val, void *v, int nr_to_call, int *nr_calls)
+static cpumask_t frozen_cpus;
+
+int disable_nonboot_cpus(void)
 {
-    BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
-    return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls);
+    int cpu, error = 0;
+
+    BUG_ON(raw_smp_processor_id() != 0);
+
+    cpus_clear(frozen_cpus);
+
+    printk("Disabling non-boot CPUs ...\n");
+
+    for_each_online_cpu ( cpu )
+    {
+        if ( cpu == 0 )
+            continue;
+
+        if ( (error = cpu_down(cpu)) )
+        {
+            BUG_ON(error == -EBUSY);
+            printk("Error taking CPU%d down: %d\n", cpu, error);
+            break;
+        }
+
+        cpu_set(cpu, frozen_cpus);
+        printk("CPU%d is down\n", cpu);
+    }
+
+    BUG_ON(!error && (num_online_cpus() != 1));
+    return error;
+}
+
+void enable_nonboot_cpus(void)
+{
+    int cpu, error;
+
+    printk("Enabling non-boot CPUs  ...\n");
+
+    for_each_cpu_mask ( cpu, frozen_cpus )
+    {
+        if ( (error = cpu_up(cpu)) )
+        {
+            BUG_ON(error == -EBUSY);
+            printk("Error taking CPU%d up: %d\n", cpu, error);
+            continue;
+        }
+        printk("CPU%d is up\n", cpu);
+    }
+
+    cpus_clear(frozen_cpus);
 }
index caca8d5c8787742ff13c9ad0bff32fe590ecfd8e..b3d4b3ba91a960a890d6af69884be4da02f4bcb5 100644 (file)
@@ -186,7 +186,7 @@ void _spin_barrier_irq(spinlock_t *lock)
     local_irq_restore(flags);
 }
 
-void _spin_lock_recursive(spinlock_t *lock)
+int _spin_trylock_recursive(spinlock_t *lock)
 {
     int cpu = smp_processor_id();
 
@@ -197,13 +197,22 @@ void _spin_lock_recursive(spinlock_t *lock)
 
     if ( likely(lock->recurse_cpu != cpu) )
     {
-        spin_lock(lock);
+        if ( !spin_trylock(lock) )
+            return 0;
         lock->recurse_cpu = cpu;
     }
 
     /* We support only fairly shallow recursion, else the counter overflows. */
     ASSERT(lock->recurse_cnt < 0xfu);
     lock->recurse_cnt++;
+
+    return 1;
+}
+
+void _spin_lock_recursive(spinlock_t *lock)
+{
+    while ( !spin_trylock_recursive(lock) )
+        cpu_relax();
 }
 
 void _spin_unlock_recursive(spinlock_t *lock)
index 9f5dd1e79917b52353308ff85c988677d3b01472..70856505e3a4281e35c6ae56da0958c772bc5574 100644 (file)
@@ -28,6 +28,7 @@
 #include <xen/stop_machine.h>
 #include <xen/errno.h>
 #include <xen/smp.h>
+#include <xen/cpu.h>
 #include <asm/current.h>
 #include <asm/processor.h>
 
@@ -72,19 +73,20 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
 
     BUG_ON(!local_irq_is_enabled());
 
+    /* cpu_online_map must not change. */
+    if ( !get_cpu_maps() )
+        return -EBUSY;
+
     allbutself = cpu_online_map;
     cpu_clear(smp_processor_id(), allbutself);
     nr_cpus = cpus_weight(allbutself);
 
-    if ( nr_cpus == 0 )
-    {
-        BUG_ON(cpu != smp_processor_id());
-        return (*fn)(data);
-    }
-
     /* Must not spin here as the holder will expect us to be descheduled. */
     if ( !spin_trylock(&stopmachine_lock) )
+    {
+        put_cpu_maps();
         return -EBUSY;
+    }
 
     stopmachine_data.fn = fn;
     stopmachine_data.fn_data = data;
@@ -113,13 +115,17 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
 
     spin_unlock(&stopmachine_lock);
 
+    put_cpu_maps();
+
     return ret;
 }
 
-static void stopmachine_action(unsigned long unused)
+static void stopmachine_action(unsigned long cpu)
 {
     enum stopmachine_state state = STOPMACHINE_START;
 
+    BUG_ON(cpu != smp_processor_id());
+
     smp_mb();
 
     while ( state != STOPMACHINE_EXIT )
index d67165493af274490620ea8cc759bdf376697a41..3c85b8795615aa4a10aa1e4d33889e68904b0986 100644 (file)
@@ -56,12 +56,8 @@ extern u32 cpu_2_logical_apicid[];
 DECLARE_PER_CPU(int, cpu_state);
 
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-extern int cpu_down(unsigned int cpu);
-extern int cpu_up(unsigned int cpu);
 extern void cpu_exit_clear(void);
 extern void cpu_uninit(void);
-extern void disable_nonboot_cpus(void);
-extern void enable_nonboot_cpus(void);
 int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
 
 /*
index 521559e6bb1b55a7c12220faf7b9c59f8a486249..115dec7896ba674f2ff8c75e65727aba66556af8 100644 (file)
@@ -5,13 +5,16 @@
 #include <xen/spinlock.h>
 #include <xen/notifier.h>
 
-extern spinlock_t cpu_add_remove_lock;
+/* Safely access cpu_online_map, cpu_present_map, etc. */
+bool_t get_cpu_maps(void);
+void put_cpu_maps(void);
 
+/* Safely perform CPU hotplug and update cpu_online_map, etc. */
+bool_t cpu_hotplug_begin(void);
+void cpu_hotplug_done(void);
+
+/* Receive notification of CPU hotplug events. */
 int register_cpu_notifier(struct notifier_block *nb);
-void unregister_cpu_notifier(struct notifier_block *nb);
-int cpu_notifier_call_chain(unsigned long val, void *v);
-int __cpu_notifier_call_chain(
-    unsigned long val, void *v, int nr_to_call, int *nr_calls);
 
 /*
  * Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail ---
@@ -25,4 +28,12 @@ int __cpu_notifier_call_chain(
 #define CPU_DYING        0x0007 /* CPU is nearly dead (in stop_machine ctxt) */
 #define CPU_DEAD         0x0008 /* CPU is dead */
 
+/* Perform CPU hotplug. May return -EAGAIN. */
+int cpu_down(unsigned int cpu);
+int cpu_up(unsigned int cpu);
+
+/* Power management. */
+int disable_nonboot_cpus(void);
+void enable_nonboot_cpus(void);
+
 #endif /* __XEN_CPU_H__ */
index e1f500c4ec63eb641a675e241964f9c382e90628..f6f737d7565207772305a020c0fb81e2e2b99055 100644 (file)
@@ -146,6 +146,7 @@ int _spin_trylock(spinlock_t *lock);
 void _spin_barrier(spinlock_t *lock);
 void _spin_barrier_irq(spinlock_t *lock);
 
+int _spin_trylock_recursive(spinlock_t *lock);
 void _spin_lock_recursive(spinlock_t *lock);
 void _spin_unlock_recursive(spinlock_t *lock);
 
@@ -191,6 +192,7 @@ int _rw_is_write_locked(rwlock_t *lock);
  * are any critical regions that cannot form part of such a set, they can use
  * standard spin_[un]lock().
  */
+#define spin_trylock_recursive(l)     _spin_trylock_recursive(l)
 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)