ia64/xen-unstable

changeset 14211:3ac19fda0bc2

linux: Support new 'fast suspend' mode which does not require us to
hotplug all auxiliary CPUs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Mar 02 12:11:52 2007 +0000 (2007-03-02)
parents bb22c21e1af7
children a69d98bf0c55 7e9dc164b572
files linux-2.6-xen-sparse/drivers/xen/core/evtchn.c linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c linux-2.6-xen-sparse/drivers/xen/core/reboot.c linux-2.6-xen-sparse/include/xen/cpu_hotplug.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Fri Mar 02 12:11:10 2007 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Fri Mar 02 12:11:52 2007 +0000
     1.3 @@ -888,11 +888,67 @@ void unmask_evtchn(int port)
     1.4  }
     1.5  EXPORT_SYMBOL_GPL(unmask_evtchn);
     1.6  
     1.7 +static void restore_cpu_virqs(int cpu)
     1.8 +{
     1.9 +	struct evtchn_bind_virq bind_virq;
    1.10 +	int virq, irq, evtchn;
    1.11 +
    1.12 +	for (virq = 0; virq < NR_VIRQS; virq++) {
    1.13 +		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
    1.14 +			continue;
    1.15 +
    1.16 +		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
    1.17 +
    1.18 +		/* Get a new binding from Xen. */
    1.19 +		bind_virq.virq = virq;
    1.20 +		bind_virq.vcpu = cpu;
    1.21 +		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
    1.22 +						&bind_virq) != 0)
    1.23 +			BUG();
    1.24 +		evtchn = bind_virq.port;
    1.25 +
    1.26 +		/* Record the new mapping. */
    1.27 +		evtchn_to_irq[evtchn] = irq;
    1.28 +		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
    1.29 +		bind_evtchn_to_cpu(evtchn, cpu);
    1.30 +
    1.31 +		/* Ready for use. */
    1.32 +		unmask_evtchn(evtchn);
    1.33 +	}
    1.34 +}
    1.35 +
    1.36 +static void restore_cpu_ipis(int cpu)
    1.37 +{
    1.38 +	struct evtchn_bind_ipi bind_ipi;
    1.39 +	int ipi, irq, evtchn;
    1.40 +
    1.41 +	for (ipi = 0; ipi < NR_IPIS; ipi++) {
    1.42 +		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
    1.43 +			continue;
    1.44 +
    1.45 +		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
    1.46 +
    1.47 +		/* Get a new binding from Xen. */
    1.48 +		bind_ipi.vcpu = cpu;
    1.49 +		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
    1.50 +						&bind_ipi) != 0)
    1.51 +			BUG();
    1.52 +		evtchn = bind_ipi.port;
    1.53 +
    1.54 +		/* Record the new mapping. */
    1.55 +		evtchn_to_irq[evtchn] = irq;
    1.56 +		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
    1.57 +		bind_evtchn_to_cpu(evtchn, cpu);
    1.58 +
    1.59 +		/* Ready for use. */
    1.60 +		unmask_evtchn(evtchn);
    1.61 +
    1.62 +	}
    1.63 +}
    1.64 +
    1.65  void irq_resume(void)
    1.66  {
    1.67 -	struct evtchn_bind_virq bind_virq;
    1.68 -	struct evtchn_bind_ipi  bind_ipi;
    1.69 -	int cpu, pirq, virq, ipi, irq, evtchn;
    1.70 +	int cpu, pirq, irq, evtchn;
    1.71  
    1.72  	init_evtchn_cpu_bindings();
    1.73  
    1.74 @@ -904,66 +960,17 @@ void irq_resume(void)
    1.75  	for (pirq = 0; pirq < NR_PIRQS; pirq++)
    1.76  		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
    1.77  
    1.78 -	/* Secondary CPUs must have no VIRQ or IPI bindings. */
    1.79 -	for_each_possible_cpu(cpu) {
    1.80 -		if (cpu == 0)
    1.81 -			continue;
    1.82 -		for (virq = 0; virq < NR_VIRQS; virq++)
    1.83 -			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
    1.84 -		for (ipi = 0; ipi < NR_IPIS; ipi++)
    1.85 -			BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
    1.86 -	}
    1.87 -
    1.88  	/* No IRQ <-> event-channel mappings. */
    1.89  	for (irq = 0; irq < NR_IRQS; irq++)
    1.90  		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
    1.91  	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
    1.92  		evtchn_to_irq[evtchn] = -1;
    1.93  
    1.94 -	/* Primary CPU: rebind VIRQs automatically. */
    1.95 -	for (virq = 0; virq < NR_VIRQS; virq++) {
    1.96 -		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
    1.97 -			continue;
    1.98 -
    1.99 -		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
   1.100 -
   1.101 -		/* Get a new binding from Xen. */
   1.102 -		bind_virq.virq = virq;
   1.103 -		bind_virq.vcpu = 0;
   1.104 -		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
   1.105 -						&bind_virq) != 0)
   1.106 -			BUG();
   1.107 -		evtchn = bind_virq.port;
   1.108 -
   1.109 -		/* Record the new mapping. */
   1.110 -		evtchn_to_irq[evtchn] = irq;
   1.111 -		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
   1.112 -
   1.113 -		/* Ready for use. */
   1.114 -		unmask_evtchn(evtchn);
   1.115 +	for_each_possible_cpu(cpu) {
   1.116 +		restore_cpu_virqs(cpu);
   1.117 +		restore_cpu_ipis(cpu);
   1.118  	}
   1.119  
   1.120 -	/* Primary CPU: rebind IPIs automatically. */
   1.121 -	for (ipi = 0; ipi < NR_IPIS; ipi++) {
   1.122 -		if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
   1.123 -			continue;
   1.124 -
   1.125 -		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
   1.126 -
   1.127 -		/* Get a new binding from Xen. */
   1.128 -		bind_ipi.vcpu = 0;
   1.129 -		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
   1.130 -						&bind_ipi) != 0)
   1.131 -			BUG();
   1.132 -		evtchn = bind_ipi.port;
   1.133 -
   1.134 -		/* Record the new mapping. */
   1.135 -		evtchn_to_irq[evtchn] = irq;
   1.136 -		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
   1.137 -
   1.138 -		/* Ready for use. */
   1.139 -		unmask_evtchn(evtchn);
   1.140 -	}
   1.141  }
   1.142  
   1.143  void __init xen_init_IRQ(void)
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c	Fri Mar 02 12:11:10 2007 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c	Fri Mar 02 12:11:52 2007 +0000
     2.3 @@ -1,4 +1,3 @@
     2.4 -#define __KERNEL_SYSCALLS__
     2.5  #include <linux/version.h>
     2.6  #include <linux/kernel.h>
     2.7  #include <linux/mm.h>
     2.8 @@ -7,6 +6,7 @@
     2.9  #include <linux/reboot.h>
    2.10  #include <linux/sysrq.h>
    2.11  #include <linux/stringify.h>
    2.12 +#include <linux/stop_machine.h>
    2.13  #include <asm/irq.h>
    2.14  #include <asm/mmu_context.h>
    2.15  #include <xen/evtchn.h>
    2.16 @@ -18,6 +18,7 @@
    2.17  #include <xen/gnttab.h>
    2.18  #include <xen/xencons.h>
    2.19  #include <xen/cpu_hotplug.h>
    2.20 +#include <xen/interface/vcpu.h>
    2.21  
    2.22  #if defined(__i386__) || defined(__x86_64__)
    2.23  
    2.24 @@ -98,7 +99,6 @@ static void post_suspend(int suspend_can
    2.25  		xen_start_info->console.domU.mfn =
    2.26  			pfn_to_mfn(xen_start_info->console.domU.mfn);
    2.27  	} else {
    2.28 -		extern cpumask_t cpu_initialized_map;
    2.29  		cpu_initialized_map = cpumask_of_cpu(0);
    2.30  	}
    2.31  	
    2.32 @@ -133,12 +133,72 @@ static void post_suspend(int suspend_can
    2.33  
    2.34  #endif
    2.35  
    2.36 -int __xen_suspend(void)
    2.37 +static int take_machine_down(void *p_fast_suspend)
    2.38 +{
    2.39 +	int fast_suspend = *(int *)p_fast_suspend;
    2.40 +	int suspend_cancelled, err, cpu;
    2.41 +	extern void time_resume(void);
    2.42 +
    2.43 +	if (fast_suspend) {
    2.44 +		preempt_disable();
    2.45 +	} else {
    2.46 +		for (;;) {
    2.47 +			err = smp_suspend();
    2.48 +			if (err)
    2.49 +				return err;
    2.50 +
    2.51 +			xenbus_suspend();
    2.52 +			preempt_disable();
    2.53 +
    2.54 +			if (num_online_cpus() == 1)
    2.55 +				break;
    2.56 +
    2.57 +			preempt_enable();
    2.58 +			xenbus_suspend_cancel();
    2.59 +		}
    2.60 +	}
    2.61 +
    2.62 +	mm_pin_all();
    2.63 +	local_irq_disable();
    2.64 +	preempt_enable();
    2.65 +	gnttab_suspend();
    2.66 +	pre_suspend();
    2.67 +
    2.68 +	/*
    2.69 +	 * This hypercall returns 1 if suspend was cancelled or the domain was
    2.70 +	 * merely checkpointed, and 0 if it is resuming in a new domain.
    2.71 +	 */
    2.72 +	suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
    2.73 +
    2.74 +	post_suspend(suspend_cancelled);
    2.75 +	gnttab_resume();
    2.76 +	if (!suspend_cancelled)
    2.77 +		irq_resume();
    2.78 +	time_resume();
    2.79 +	switch_idle_mm();
    2.80 +	local_irq_enable();
    2.81 +
    2.82 +	if (fast_suspend && !suspend_cancelled) {
    2.83 +		/*
    2.84 +		 * In fast-suspend mode the APs may not be brought back online
    2.85 +		 * when we resume. In that case we do it here.
    2.86 +		 */
    2.87 +		for_each_online_cpu(cpu) {
    2.88 +			if (cpu == 0)
    2.89 +				continue;
    2.90 +			cpu_set_initialized(cpu);
    2.91 +			err = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
    2.92 +			BUG_ON(err);
    2.93 +		}
    2.94 +	}
    2.95 +
    2.96 +	return suspend_cancelled;
    2.97 +}
    2.98 +
    2.99 +int __xen_suspend(int fast_suspend)
   2.100  {
   2.101  	int err, suspend_cancelled;
   2.102  
   2.103 -	extern void time_resume(void);
   2.104 -
   2.105  	BUG_ON(smp_processor_id() != 0);
   2.106  	BUG_ON(in_interrupt());
   2.107  
   2.108 @@ -150,48 +210,17 @@ int __xen_suspend(void)
   2.109  	}
   2.110  #endif
   2.111  
   2.112 -	for (;;) {
   2.113 -		err = smp_suspend();
   2.114 -		if (err)
   2.115 -			return err;
   2.116 -
   2.117 +	if (fast_suspend) {
   2.118  		xenbus_suspend();
   2.119 -		preempt_disable();
   2.120 -
   2.121 -		if (num_online_cpus() == 1)
   2.122 -			break;
   2.123 -
   2.124 -		preempt_enable();
   2.125 -		xenbus_suspend_cancel();
   2.126 +		err = stop_machine_run(take_machine_down, &fast_suspend, 0);
   2.127 +	} else {
   2.128 +		err = take_machine_down(&fast_suspend);
   2.129  	}
   2.130  
   2.131 -	mm_pin_all();
   2.132 -	local_irq_disable();
   2.133 -	preempt_enable();
   2.134 -
   2.135 -	gnttab_suspend();
   2.136 -
   2.137 -	pre_suspend();
   2.138 +	if (err < 0)
   2.139 +		return err;
   2.140  
   2.141 -	/*
   2.142 -	 * This hypercall returns 1 if suspend was cancelled or the domain was
   2.143 -	 * merely checkpointed, and 0 if it is resuming in a new domain.
   2.144 -	 */
   2.145 -	suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
   2.146 -
   2.147 -	post_suspend(suspend_cancelled);
   2.148 -
   2.149 -	gnttab_resume();
   2.150 -
   2.151 -	if (!suspend_cancelled)
   2.152 -		irq_resume();
   2.153 -
   2.154 -	time_resume();
   2.155 -
   2.156 -	switch_idle_mm();
   2.157 -
   2.158 -	local_irq_enable();
   2.159 -
   2.160 +	suspend_cancelled = err;
   2.161  	if (!suspend_cancelled) {
   2.162  		xencons_resume();
   2.163  		xenbus_resume();
   2.164 @@ -199,7 +228,8 @@ int __xen_suspend(void)
   2.165  		xenbus_suspend_cancel();
   2.166  	}
   2.167  
   2.168 -	smp_resume();
   2.169 +	if (!fast_suspend)
   2.170 +		smp_resume();
   2.171  
   2.172 -	return err;
   2.173 +	return 0;
   2.174  }
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Fri Mar 02 12:11:10 2007 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Fri Mar 02 12:11:52 2007 +0000
     3.3 @@ -24,13 +24,16 @@ MODULE_LICENSE("Dual BSD/GPL");
     3.4  /* Ignore multiple shutdown requests. */
     3.5  static int shutting_down = SHUTDOWN_INVALID;
     3.6  
     3.7 +/* Can we leave APs online when we suspend? */
     3.8 +static int fast_suspend;
     3.9 +
    3.10  static void __shutdown_handler(void *unused);
    3.11  static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
    3.12  
    3.13  #ifdef CONFIG_XEN
    3.14 -int __xen_suspend(void);
    3.15 +int __xen_suspend(int fast_suspend);
    3.16  #else
    3.17 -#define __xen_suspend() (void)0
    3.18 +#define __xen_suspend(fast_suspend) 0
    3.19  #endif
    3.20  
    3.21  static int shutdown_process(void *__unused)
    3.22 @@ -44,7 +47,8 @@ static int shutdown_process(void *__unus
    3.23  
    3.24  	if ((shutting_down == SHUTDOWN_POWEROFF) ||
    3.25  	    (shutting_down == SHUTDOWN_HALT)) {
    3.26 -		if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) {
    3.27 +		if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
    3.28 +					envp, 0) < 0) {
    3.29  #ifdef CONFIG_XEN
    3.30  			sys_reboot(LINUX_REBOOT_MAGIC1,
    3.31  				   LINUX_REBOOT_MAGIC2,
    3.32 @@ -61,7 +65,9 @@ static int shutdown_process(void *__unus
    3.33  
    3.34  static int xen_suspend(void *__unused)
    3.35  {
    3.36 -	__xen_suspend();
    3.37 +	int err = __xen_suspend(fast_suspend);
    3.38 +	if (err)
    3.39 +		printk(KERN_ERR "Xen suspend failed (%d)\n", err);
    3.40  	shutting_down = SHUTDOWN_INVALID;
    3.41  	return 0;
    3.42  }
    3.43 @@ -193,6 +199,10 @@ static int setup_shutdown_watcher(struct
    3.44  {
    3.45  	int err;
    3.46  
    3.47 +	xenbus_scanf(XBT_NIL, "control",
    3.48 +		     "platform-feature-multiprocessor-suspend",
    3.49 +		     "%d", &fast_suspend);
    3.50 +
    3.51  	err = register_xenbus_watch(&shutdown_watch);
    3.52  	if (err)
    3.53  		printk(KERN_ERR "Failed to set shutdown watcher\n");
     4.1 --- a/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h	Fri Mar 02 12:11:10 2007 +0000
     4.2 +++ b/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h	Fri Mar 02 12:11:52 2007 +0000
     4.3 @@ -4,6 +4,13 @@
     4.4  #include <linux/kernel.h>
     4.5  #include <linux/cpumask.h>
     4.6  
     4.7 +#if defined(CONFIG_X86)
     4.8 +extern cpumask_t cpu_initialized_map;
     4.9 +#define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map)
    4.10 +#else
    4.11 +#define cpu_set_initialized(cpu) ((void)0)
    4.12 +#endif
    4.13 +
    4.14  #if defined(CONFIG_HOTPLUG_CPU)
    4.15  
    4.16  int cpu_up_check(unsigned int cpu);