ia64/xen-unstable
changeset 14801:5d7fb634ec1a
PV-on-HVM: More save/restore fixes.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Apr 11 09:16:04 2007 +0100 (2007-04-11) |
parents | 87e2174b8a0d |
children | 6e7ef794cdbc |
files | linux-2.6-xen-sparse/drivers/xen/core/gnttab.c unmodified_drivers/linux-2.6/platform-pci/evtchn.c unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c unmodified_drivers/linux-2.6/platform-pci/platform-pci.c unmodified_drivers/linux-2.6/platform-pci/platform-pci.h |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Tue Apr 10 20:00:45 2007 +0100 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Wed Apr 11 09:16:04 2007 +0100 1.3 @@ -60,9 +60,6 @@ static grant_ref_t gnttab_free_head; 1.4 static DEFINE_SPINLOCK(gnttab_list_lock); 1.5 1.6 static struct grant_entry *shared; 1.7 -#ifndef CONFIG_XEN 1.8 -static unsigned long resume_frames; 1.9 -#endif 1.10 1.11 static struct gnttab_free_callback *gnttab_free_callback_list; 1.12 1.13 @@ -514,6 +511,8 @@ int gnttab_suspend(void) 1.14 1.15 #include <platform-pci.h> 1.16 1.17 +static unsigned long resume_frames; 1.18 + 1.19 static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1.20 { 1.21 struct xen_add_to_physmap xatp; 1.22 @@ -543,23 +542,17 @@ int gnttab_resume(void) 1.23 if (max_nr_gframes < nr_gframes) 1.24 return -ENOSYS; 1.25 1.26 - resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); 1.27 + if (!resume_frames) { 1.28 + resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); 1.29 + shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); 1.30 + if (shared == NULL) { 1.31 + printk("error to ioremap gnttab share frames\n"); 1.32 + return -1; 1.33 + } 1.34 + } 1.35 1.36 gnttab_map(0, nr_gframes - 1); 1.37 1.38 - shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); 1.39 - if (shared == NULL) { 1.40 - printk("error to ioremap gnttab share frames\n"); 1.41 - return -1; 1.42 - } 1.43 - 1.44 - return 0; 1.45 -} 1.46 - 1.47 -int gnttab_suspend(void) 1.48 -{ 1.49 - iounmap(shared); 1.50 - resume_frames = 0; 1.51 return 0; 1.52 } 1.53
2.1 --- a/unmodified_drivers/linux-2.6/platform-pci/evtchn.c Tue Apr 10 20:00:45 2007 +0100 2.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/evtchn.c Wed Apr 11 09:16:04 2007 +0100 2.3 @@ -28,8 +28,10 @@ 2.4 * IN THE SOFTWARE. 2.5 */ 2.6 2.7 +#include <linux/config.h> 2.8 #include <linux/module.h> 2.9 #include <linux/kernel.h> 2.10 +#include <linux/spinlock.h> 2.11 #include <xen/evtchn.h> 2.12 #include <xen/interface/hvm/ioreq.h> 2.13 #include <xen/features.h> 2.14 @@ -41,29 +43,37 @@ 2.15 2.16 void *shared_info_area; 2.17 2.18 -static DEFINE_MUTEX(irq_evtchn_mutex); 2.19 - 2.20 #define is_valid_evtchn(x) ((x) != 0) 2.21 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) 2.22 2.23 static struct { 2.24 + spinlock_t lock; 2.25 irqreturn_t(*handler) (int, void *, struct pt_regs *); 2.26 void *dev_id; 2.27 int evtchn; 2.28 int close:1; /* close on unbind_from_irqhandler()? */ 2.29 int inuse:1; 2.30 + int in_handler:1; 2.31 } irq_evtchn[256]; 2.32 static int evtchn_to_irq[NR_EVENT_CHANNELS] = { 2.33 [0 ... NR_EVENT_CHANNELS-1] = -1 }; 2.34 2.35 -static int find_unbound_irq(void) 2.36 +static DEFINE_SPINLOCK(irq_alloc_lock); 2.37 + 2.38 +static int alloc_xen_irq(void) 2.39 { 2.40 static int warned; 2.41 int irq; 2.42 2.43 - for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) 2.44 - if (!irq_evtchn[irq].inuse) 2.45 - return irq; 2.46 + spin_lock(&irq_alloc_lock); 2.47 + 2.48 + for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) { 2.49 + if (irq_evtchn[irq].inuse) 2.50 + continue; 2.51 + irq_evtchn[irq].inuse = 1; 2.52 + spin_unlock(&irq_alloc_lock); 2.53 + return irq; 2.54 + } 2.55 2.56 if (!warned) { 2.57 warned = 1; 2.58 @@ -71,9 +81,18 @@ static int find_unbound_irq(void) 2.59 "increase irq_evtchn[] size in evtchn.c.\n"); 2.60 } 2.61 2.62 + spin_unlock(&irq_alloc_lock); 2.63 + 2.64 return -ENOSPC; 2.65 } 2.66 2.67 +static void free_xen_irq(int irq) 2.68 +{ 2.69 + spin_lock(&irq_alloc_lock); 2.70 + irq_evtchn[irq].inuse = 0; 2.71 + spin_unlock(&irq_alloc_lock); 2.72 +} 2.73 + 2.74 int irq_to_evtchn_port(int irq) 2.75 { 2.76 return irq_evtchn[irq].evtchn; 2.77 @@ -93,8 +112,7 @@ void unmask_evtchn(int port) 2.78 shared_info_t *s = shared_info_area; 2.79 vcpu_info_t *vcpu_info; 2.80 2.81 - preempt_disable(); 2.82 - cpu = smp_processor_id(); 2.83 + cpu = get_cpu(); 2.84 vcpu_info = &s->vcpu_info[cpu]; 2.85 2.86 /* Slow path (hypercall) if this is a non-local port. We only 2.87 @@ -103,7 +121,7 @@ void unmask_evtchn(int port) 2.88 evtchn_unmask_t op = { .port = port }; 2.89 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, 2.90 &op); 2.91 - preempt_enable(); 2.92 + put_cpu(); 2.93 return; 2.94 } 2.95 2.96 @@ -121,7 +139,8 @@ void unmask_evtchn(int port) 2.97 if (!vcpu_info->evtchn_upcall_mask) 2.98 force_evtchn_callback(); 2.99 } 2.100 - preempt_enable(); 2.101 + 2.102 + put_cpu(); 2.103 } 2.104 EXPORT_SYMBOL(unmask_evtchn); 2.105 2.106 @@ -135,20 +154,19 @@ int bind_listening_port_to_irqhandler( 2.107 struct evtchn_alloc_unbound alloc_unbound; 2.108 int err, irq; 2.109 2.110 - mutex_lock(&irq_evtchn_mutex); 2.111 + irq = alloc_xen_irq(); 2.112 + if (irq < 0) 2.113 + return irq; 2.114 2.115 - irq = find_unbound_irq(); 2.116 - if (irq < 0) { 2.117 - mutex_unlock(&irq_evtchn_mutex); 2.118 - return irq; 2.119 - } 2.120 + spin_lock_irq(&irq_evtchn[irq].lock); 2.121 2.122 alloc_unbound.dom = DOMID_SELF; 2.123 alloc_unbound.remote_dom = remote_domain; 2.124 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 2.125 &alloc_unbound); 2.126 if (err) { 2.127 - mutex_unlock(&irq_evtchn_mutex); 2.128 + spin_unlock_irq(&irq_evtchn[irq].lock); 2.129 + free_xen_irq(irq); 2.130 return err; 2.131 } 2.132 2.133 @@ -156,13 +174,13 @@ int bind_listening_port_to_irqhandler( 2.134 irq_evtchn[irq].dev_id = dev_id; 2.135 irq_evtchn[irq].evtchn = alloc_unbound.port; 2.136 irq_evtchn[irq].close = 1; 2.137 - irq_evtchn[irq].inuse = 1; 2.138 2.139 evtchn_to_irq[alloc_unbound.port] = irq; 2.140 2.141 unmask_evtchn(alloc_unbound.port); 2.142 2.143 - mutex_unlock(&irq_evtchn_mutex); 2.144 + spin_unlock_irq(&irq_evtchn[irq].lock); 2.145 + 2.146 return irq; 2.147 } 2.148 EXPORT_SYMBOL(bind_listening_port_to_irqhandler); 2.149 @@ -176,34 +194,34 @@ int bind_caller_port_to_irqhandler( 2.150 { 2.151 int irq; 2.152 2.153 - mutex_lock(&irq_evtchn_mutex); 2.154 + irq = alloc_xen_irq(); 2.155 + if (irq < 0) 2.156 + return irq; 2.157 2.158 - irq = find_unbound_irq(); 2.159 - if (irq < 0) { 2.160 - mutex_unlock(&irq_evtchn_mutex); 2.161 - return irq; 2.162 - } 2.163 + spin_lock_irq(&irq_evtchn[irq].lock); 2.164 2.165 irq_evtchn[irq].handler = handler; 2.166 irq_evtchn[irq].dev_id = dev_id; 2.167 irq_evtchn[irq].evtchn = caller_port; 2.168 irq_evtchn[irq].close = 0; 2.169 - irq_evtchn[irq].inuse = 1; 2.170 2.171 evtchn_to_irq[caller_port] = irq; 2.172 2.173 unmask_evtchn(caller_port); 2.174 2.175 - mutex_unlock(&irq_evtchn_mutex); 2.176 + spin_unlock_irq(&irq_evtchn[irq].lock); 2.177 + 2.178 return irq; 2.179 } 2.180 EXPORT_SYMBOL(bind_caller_port_to_irqhandler); 2.181 2.182 void unbind_from_irqhandler(unsigned int irq, void *dev_id) 2.183 { 2.184 - int evtchn = evtchn_from_irq(irq); 2.185 + int evtchn; 2.186 2.187 - mutex_lock(&irq_evtchn_mutex); 2.188 + spin_lock_irq(&irq_evtchn[irq].lock); 2.189 + 2.190 + evtchn = evtchn_from_irq(irq); 2.191 2.192 if (is_valid_evtchn(evtchn)) { 2.193 evtchn_to_irq[irq] = -1; 2.194 @@ -216,21 +234,28 @@ void unbind_from_irqhandler(unsigned int 2.195 2.196 irq_evtchn[irq].handler = NULL; 2.197 irq_evtchn[irq].evtchn = 0; 2.198 - irq_evtchn[irq].inuse = 0; 2.199 + 2.200 + spin_unlock_irq(&irq_evtchn[irq].lock); 2.201 2.202 - mutex_unlock(&irq_evtchn_mutex); 2.203 + while (irq_evtchn[irq].in_handler) 2.204 + cpu_relax(); 2.205 + 2.206 + free_xen_irq(irq); 2.207 } 2.208 EXPORT_SYMBOL(unbind_from_irqhandler); 2.209 2.210 void notify_remote_via_irq(int irq) 2.211 { 2.212 - int evtchn = evtchn_from_irq(irq); 2.213 + int evtchn; 2.214 + 2.215 + evtchn = evtchn_from_irq(irq); 2.216 if (is_valid_evtchn(evtchn)) 2.217 notify_remote_via_evtchn(evtchn); 2.218 } 2.219 EXPORT_SYMBOL(notify_remote_via_irq); 2.220 2.221 -irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs) 2.222 +static irqreturn_t evtchn_interrupt(int irq, void *dev_id, 2.223 + struct pt_regs *regs) 2.224 { 2.225 unsigned int l1i, port; 2.226 /* XXX: All events are bound to vcpu0 but irq may be redirected. */ 2.227 @@ -249,13 +274,30 @@ irqreturn_t evtchn_interrupt(int irq, vo 2.228 while ((l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i])) { 2.229 port = (l1i * BITS_PER_LONG) + __ffs(l2); 2.230 synch_clear_bit(port, &s->evtchn_pending[0]); 2.231 + 2.232 irq = evtchn_to_irq[port]; 2.233 - if ((irq >= 0) && 2.234 - ((handler = irq_evtchn[irq].handler) != NULL)) 2.235 - handler(irq, irq_evtchn[irq].dev_id, regs); 2.236 - else 2.237 - printk(KERN_WARNING "unexpected event channel " 2.238 - "upcall on port %d!\n", port); 2.239 + if (irq < 0) 2.240 + continue; 2.241 + 2.242 + spin_lock(&irq_evtchn[irq].lock); 2.243 + handler = irq_evtchn[irq].handler; 2.244 + dev_id = irq_evtchn[irq].dev_id; 2.245 + if (unlikely(handler == NULL)) { 2.246 + printk("Xen IRQ%d (port %d) has no handler!\n", 2.247 + irq, port); 2.248 + spin_unlock(&irq_evtchn[irq].lock); 2.249 + continue; 2.250 + } 2.251 + irq_evtchn[irq].in_handler = 1; 2.252 + spin_unlock(&irq_evtchn[irq].lock); 2.253 + 2.254 + local_irq_enable(); 2.255 + handler(irq, irq_evtchn[irq].dev_id, regs); 2.256 + local_irq_disable(); 2.257 + 2.258 + spin_lock(&irq_evtchn[irq].lock); 2.259 + irq_evtchn[irq].in_handler = 0; 2.260 + spin_unlock(&irq_evtchn[irq].lock); 2.261 } 2.262 } 2.263 2.264 @@ -268,16 +310,6 @@ void force_evtchn_callback(void) 2.265 } 2.266 EXPORT_SYMBOL(force_evtchn_callback); 2.267 2.268 -void irq_suspend(void) 2.269 -{ 2.270 - mutex_lock(&irq_evtchn_mutex); 2.271 -} 2.272 - 2.273 -void irq_suspend_cancel(void) 2.274 -{ 2.275 - mutex_unlock(&irq_evtchn_mutex); 2.276 -} 2.277 - 2.278 void irq_resume(void) 2.279 { 2.280 int evtchn, irq; 2.281 @@ -289,6 +321,16 @@ void irq_resume(void) 2.282 2.283 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) 2.284 irq_evtchn[irq].evtchn = 0; 2.285 +} 2.286 2.287 - mutex_unlock(&irq_evtchn_mutex); 2.288 +int xen_irq_init(struct pci_dev *pdev) 2.289 +{ 2.290 + int irq; 2.291 + 2.292 + for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) 2.293 + spin_lock_init(&irq_evtchn[irq].lock); 2.294 + 2.295 + return request_irq(pdev->irq, evtchn_interrupt, 2.296 + SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, 2.297 + "xen-platform-pci", pdev); 2.298 }
3.1 --- a/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c Tue Apr 10 20:00:45 2007 +0100 3.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c Wed Apr 11 09:16:04 2007 +0100 3.3 @@ -1,24 +1,81 @@ 3.4 #include <linux/config.h> 3.5 +#include <linux/stop_machine.h> 3.6 +#include <xen/evtchn.h> 3.7 +#include <xen/gnttab.h> 3.8 #include <xen/xenbus.h> 3.9 #include "platform-pci.h" 3.10 #include <asm/hypervisor.h> 3.11 3.12 -int __xen_suspend(int fast_suspend) 3.13 +/* 3.14 + * Spinning prevents, for example, APs touching grant table entries while 3.15 + * the shared grant table is not mapped into the address space imemdiately 3.16 + * after resume. 3.17 + */ 3.18 +static void ap_suspend(void *_ap_spin) 3.19 +{ 3.20 + int *ap_spin = _ap_spin; 3.21 + 3.22 + BUG_ON(!irqs_disabled()); 3.23 + 3.24 + while (*ap_spin) { 3.25 + cpu_relax(); 3.26 + HYPERVISOR_yield(); 3.27 + } 3.28 +} 3.29 + 3.30 +static int bp_suspend(void) 3.31 { 3.32 int suspend_cancelled; 3.33 3.34 - xenbus_suspend(); 3.35 - platform_pci_suspend(); 3.36 + BUG_ON(!irqs_disabled()); 3.37 3.38 suspend_cancelled = HYPERVISOR_shutdown(SHUTDOWN_suspend); 3.39 3.40 - if (suspend_cancelled) { 3.41 - platform_pci_suspend_cancel(); 3.42 + if (!suspend_cancelled) { 3.43 + platform_pci_resume(); 3.44 + gnttab_resume(); 3.45 + irq_resume(); 3.46 + } 3.47 + 3.48 + return suspend_cancelled; 3.49 +} 3.50 + 3.51 +int __xen_suspend(int fast_suspend) 3.52 +{ 3.53 + int err, suspend_cancelled, ap_spin; 3.54 + 3.55 + xenbus_suspend(); 3.56 + 3.57 + preempt_disable(); 3.58 + 3.59 + /* Prevent any races with evtchn_interrupt() handler. */ 3.60 + disable_irq(xen_platform_pdev->irq); 3.61 + 3.62 + ap_spin = 1; 3.63 + smp_mb(); 3.64 + 3.65 + err = smp_call_function(ap_suspend, &ap_spin, 0, 0); 3.66 + if (err < 0) { 3.67 + preempt_enable(); 3.68 xenbus_suspend_cancel(); 3.69 - } else { 3.70 - platform_pci_resume(); 3.71 + return err; 3.72 + } 3.73 + 3.74 + local_irq_disable(); 3.75 + suspend_cancelled = bp_suspend(); 3.76 + local_irq_enable(); 3.77 + 3.78 + smp_mb(); 3.79 + ap_spin = 0; 3.80 + 3.81 + enable_irq(xen_platform_pdev->irq); 3.82 + 3.83 + preempt_enable(); 3.84 + 3.85 + if (!suspend_cancelled) 3.86 xenbus_resume(); 3.87 - } 3.88 + else 3.89 + xenbus_suspend_cancel(); 3.90 3.91 return 0; 3.92 }
4.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Tue Apr 10 20:00:45 2007 +0100 4.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Apr 11 09:16:04 2007 +0100 4.3 @@ -40,7 +40,6 @@ 4.4 #include <xen/interface/hvm/params.h> 4.5 #include <xen/features.h> 4.6 #include <xen/evtchn.h> 4.7 -#include <xen/gnttab.h> 4.8 #ifdef __ia64__ 4.9 #include <asm/xen/xencomm.h> 4.10 #endif 4.11 @@ -62,6 +61,8 @@ MODULE_AUTHOR("ssmith@xensource.com"); 4.12 MODULE_DESCRIPTION("Xen platform PCI device"); 4.13 MODULE_LICENSE("GPL"); 4.14 4.15 +struct pci_dev *xen_platform_pdev; 4.16 + 4.17 static unsigned long shared_info_frame; 4.18 static uint64_t callback_via; 4.19 4.20 @@ -89,8 +90,6 @@ static int __devinit init_xen_info(void) 4.21 if (shared_info_area == NULL) 4.22 panic("can't map shared info\n"); 4.23 4.24 - gnttab_init(); 4.25 - 4.26 return 0; 4.27 } 4.28 4.29 @@ -199,8 +198,10 @@ static int set_callback_via(uint64_t via 4.30 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 4.31 } 4.32 4.33 +int xen_irq_init(struct pci_dev *pdev); 4.34 int xenbus_init(void); 4.35 int xen_reboot_init(void); 4.36 +int gnttab_init(void); 4.37 4.38 static int __devinit platform_pci_init(struct pci_dev *pdev, 4.39 const struct pci_device_id *ent) 4.40 @@ -209,6 +210,10 @@ static int __devinit platform_pci_init(s 4.41 long ioaddr, iolen; 4.42 long mmio_addr, mmio_len; 4.43 4.44 + if (xen_platform_pdev) 4.45 + return -EBUSY; 4.46 + xen_platform_pdev = pdev; 4.47 + 4.48 i = pci_enable_device(pdev); 4.49 if (i) 4.50 return i; 4.51 @@ -249,9 +254,10 @@ static int __devinit platform_pci_init(s 4.52 if ((ret = init_xen_info())) 4.53 goto out; 4.54 4.55 - if ((ret = request_irq(pdev->irq, evtchn_interrupt, 4.56 - SA_SHIRQ | SA_SAMPLE_RANDOM, 4.57 - "xen-platform-pci", pdev))) 4.58 + if ((ret = gnttab_init())) 4.59 + goto out; 4.60 + 4.61 + if ((ret = xen_irq_init(pdev))) 4.62 goto out; 4.63 4.64 if ((ret = set_callback_via(callback_via))) 4.65 @@ -292,18 +298,6 @@ static struct pci_driver platform_driver 4.66 4.67 static int pci_device_registered; 4.68 4.69 -void platform_pci_suspend(void) 4.70 -{ 4.71 - gnttab_suspend(); 4.72 - irq_suspend(); 4.73 -} 4.74 - 4.75 -void platform_pci_suspend_cancel(void) 4.76 -{ 4.77 - irq_suspend_cancel(); 4.78 - gnttab_resume(); 4.79 -} 4.80 - 4.81 void platform_pci_resume(void) 4.82 { 4.83 struct xen_add_to_physmap xatp; 4.84 @@ -319,12 +313,8 @@ void platform_pci_resume(void) 4.85 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 4.86 BUG(); 4.87 4.88 - irq_resume(); 4.89 - 4.90 if (set_callback_via(callback_via)) 4.91 printk("platform_pci_resume failure!\n"); 4.92 - 4.93 - gnttab_resume(); 4.94 } 4.95 4.96 static int __init platform_pci_module_init(void)
5.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h Tue Apr 10 20:00:45 2007 +0100 5.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h Wed Apr 11 09:16:04 2007 +0100 5.3 @@ -22,16 +22,11 @@ 5.4 #ifndef _XEN_PLATFORM_PCI_H 5.5 #define _XEN_PLATFORM_PCI_H 5.6 5.7 -#include <linux/interrupt.h> 5.8 +#include <linux/pci.h> 5.9 5.10 unsigned long alloc_xen_mmio(unsigned long len); 5.11 -int gnttab_init(void); 5.12 -irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs); 5.13 -void irq_suspend(void); 5.14 -void irq_suspend_cancel(void); 5.15 - 5.16 -void platform_pci_suspend(void); 5.17 -void platform_pci_suspend_cancel(void); 5.18 void platform_pci_resume(void); 5.19 5.20 +extern struct pci_dev *xen_platform_pdev; 5.21 + 5.22 #endif /* _XEN_PLATFORM_PCI_H */