ia64/xen-unstable

view unmodified_drivers/linux-2.6/platform-pci/platform-pci.c @ 17049:27314cfbcefe

pv-on-hvm: Signal crash to Xen tools when HVM guest panics.

Attached patch adds a function to automatically dump core file when
guest linux on HVM domain panics, in the same way as PV domain.

I tested this patch with kernel 2.6.9 and 2.6.18 on both of x86 and
ia64 (to buid for ia64, some patches in the ia64 tree are needed) by
the following steps, and confirmed it works well:

1. Build xen-platform-pci.ko.
2. In /etc/xen/xend-config.sxp, set (enable-dump yes).
3. On guest linux, execute insmod:
# insmod xen-platform-pci.ko
4. When guest linux panics, a core file is dumped.

Signed-off-by: Tetsu Yamamoto <yamamoto.tetsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 13 10:42:09 2008 +0000 (2008-02-13)
parents 3d97c1c1f7c8
children 8d993552673a
line source
1 /******************************************************************************
2 * platform-pci.c
3 *
4 * Xen platform PCI device driver
5 * Copyright (c) 2005, Intel Corporation.
6 * Copyright (c) 2007, XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/errno.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/version.h>
30 #include <linux/interrupt.h>
31 #include <linux/vmalloc.h>
32 #include <linux/mm.h>
33 #include <asm/system.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/uaccess.h>
37 #include <asm/hypervisor.h>
38 #include <asm/pgtable.h>
39 #include <xen/interface/memory.h>
40 #include <xen/interface/hvm/params.h>
41 #include <xen/features.h>
42 #include <xen/evtchn.h>
43 #ifdef __ia64__
44 #include <asm/xen/xencomm.h>
45 #endif
47 #include "platform-pci.h"
49 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
50 #include <xen/platform-compat.h>
51 #endif
53 #define DRV_NAME "xen-platform-pci"
54 #define DRV_VERSION "0.10"
55 #define DRV_RELDATE "03/03/2005"
57 static int max_hypercall_stub_pages, nr_hypercall_stub_pages;
58 char *hypercall_stubs;
59 EXPORT_SYMBOL(hypercall_stubs);
61 MODULE_AUTHOR("ssmith@xensource.com");
62 MODULE_DESCRIPTION("Xen platform PCI device");
63 MODULE_LICENSE("GPL");
65 struct pci_dev *xen_platform_pdev;
67 static unsigned long shared_info_frame;
68 static uint64_t callback_via;
70 static int __devinit init_xen_info(void)
71 {
72 struct xen_add_to_physmap xatp;
73 extern void *shared_info_area;
75 #ifdef __ia64__
76 xencomm_initialize();
77 #endif
79 setup_xen_features();
81 shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT;
82 xatp.domid = DOMID_SELF;
83 xatp.idx = 0;
84 xatp.space = XENMAPSPACE_shared_info;
85 xatp.gpfn = shared_info_frame;
86 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
87 BUG();
89 shared_info_area =
90 ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE);
91 if (shared_info_area == NULL)
92 panic("can't map shared info\n");
94 return 0;
95 }
97 static unsigned long platform_mmio;
98 static unsigned long platform_mmio_alloc;
99 static unsigned long platform_mmiolen;
101 unsigned long alloc_xen_mmio(unsigned long len)
102 {
103 unsigned long addr;
105 addr = platform_mmio + platform_mmio_alloc;
106 platform_mmio_alloc += len;
107 BUG_ON(platform_mmio_alloc > platform_mmiolen);
109 return addr;
110 }
112 #ifndef __ia64__
114 static int init_hypercall_stubs(void)
115 {
116 uint32_t eax, ebx, ecx, edx, pages, msr, i;
117 char signature[13];
119 cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
120 *(uint32_t*)(signature + 0) = ebx;
121 *(uint32_t*)(signature + 4) = ecx;
122 *(uint32_t*)(signature + 8) = edx;
123 signature[12] = 0;
125 if (strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002)) {
126 printk(KERN_WARNING
127 "Detected Xen platform device but not Xen VMM?"
128 " (sig %s, eax %x)\n",
129 signature, eax);
130 return -EINVAL;
131 }
133 cpuid(0x40000001, &eax, &ebx, &ecx, &edx);
135 printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff);
137 /*
138 * Find largest supported number of hypercall pages.
139 * We'll create as many as possible up to this number.
140 */
141 cpuid(0x40000002, &pages, &msr, &ecx, &edx);
143 /*
144 * Use __vmalloc() because vmalloc_exec() is not an exported symbol.
145 * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL.
146 * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE);
147 */
148 while (pages > 0) {
149 hypercall_stubs = __vmalloc(
150 pages * PAGE_SIZE,
151 GFP_KERNEL | __GFP_HIGHMEM,
152 __pgprot(__PAGE_KERNEL & ~_PAGE_NX));
153 if (hypercall_stubs != NULL)
154 break;
155 pages--; /* vmalloc failed: try one fewer pages */
156 }
158 if (hypercall_stubs == NULL)
159 return -ENOMEM;
161 for (i = 0; i < pages; i++) {
162 unsigned long pfn;
163 pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE);
164 wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i);
165 }
167 nr_hypercall_stub_pages = pages;
168 max_hypercall_stub_pages = pages;
170 printk(KERN_INFO "Hypercall area is %u pages.\n", pages);
172 return 0;
173 }
175 static void resume_hypercall_stubs(void)
176 {
177 uint32_t eax, ebx, ecx, edx, pages, msr, i;
178 char signature[13];
180 cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
181 *(uint32_t*)(signature + 0) = ebx;
182 *(uint32_t*)(signature + 4) = ecx;
183 *(uint32_t*)(signature + 8) = edx;
184 signature[12] = 0;
186 BUG_ON(strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002));
188 cpuid(0x40000002, &pages, &msr, &ecx, &edx);
190 if (pages > max_hypercall_stub_pages)
191 pages = max_hypercall_stub_pages;
193 for (i = 0; i < pages; i++) {
194 unsigned long pfn;
195 pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE);
196 wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i);
197 }
199 nr_hypercall_stub_pages = pages;
200 }
202 #else /* __ia64__ */
204 #define init_hypercall_stubs() (0)
205 #define resume_hypercall_stubs() ((void)0)
207 #endif
209 static uint64_t get_callback_via(struct pci_dev *pdev)
210 {
211 u8 pin;
212 int irq;
214 #ifdef __ia64__
215 for (irq = 0; irq < 16; irq++) {
216 if (isa_irq_to_vector(irq) == pdev->irq)
217 return irq; /* ISA IRQ */
218 }
219 #else /* !__ia64__ */
220 irq = pdev->irq;
221 if (irq < 16)
222 return irq; /* ISA IRQ */
223 #endif
225 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
226 pin = pdev->pin;
227 #else
228 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
229 #endif
231 /* We don't know the GSI. Specify the PCI INTx line instead. */
232 return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */
233 ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
234 ((uint64_t)pdev->bus->number << 16) |
235 ((uint64_t)(pdev->devfn & 0xff) << 8) |
236 ((uint64_t)(pin - 1) & 3));
237 }
239 static int set_callback_via(uint64_t via)
240 {
241 struct xen_hvm_param a;
243 a.domid = DOMID_SELF;
244 a.index = HVM_PARAM_CALLBACK_IRQ;
245 a.value = via;
246 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
247 }
249 int xen_irq_init(struct pci_dev *pdev);
250 int xenbus_init(void);
251 int xen_reboot_init(void);
252 int xen_panic_handler_init(void);
253 int gnttab_init(void);
255 static int __devinit platform_pci_init(struct pci_dev *pdev,
256 const struct pci_device_id *ent)
257 {
258 int i, ret;
259 long ioaddr, iolen;
260 long mmio_addr, mmio_len;
262 if (xen_platform_pdev)
263 return -EBUSY;
264 xen_platform_pdev = pdev;
266 i = pci_enable_device(pdev);
267 if (i)
268 return i;
270 ioaddr = pci_resource_start(pdev, 0);
271 iolen = pci_resource_len(pdev, 0);
273 mmio_addr = pci_resource_start(pdev, 1);
274 mmio_len = pci_resource_len(pdev, 1);
276 callback_via = get_callback_via(pdev);
278 if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) {
279 printk(KERN_WARNING DRV_NAME ":no resources found\n");
280 return -ENOENT;
281 }
283 if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
284 printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n",
285 mmio_addr, mmio_len);
286 return -EBUSY;
287 }
289 if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
290 printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n",
291 iolen, ioaddr);
292 release_mem_region(mmio_addr, mmio_len);
293 return -EBUSY;
294 }
296 platform_mmio = mmio_addr;
297 platform_mmiolen = mmio_len;
299 ret = init_hypercall_stubs();
300 if (ret < 0)
301 goto out;
303 if ((ret = init_xen_info()))
304 goto out;
306 if ((ret = gnttab_init()))
307 goto out;
309 if ((ret = xen_irq_init(pdev)))
310 goto out;
312 if ((ret = set_callback_via(callback_via)))
313 goto out;
315 if ((ret = xenbus_init()))
316 goto out;
318 if ((ret = xen_reboot_init()))
319 goto out;
321 if ((ret = xen_panic_handler_init()))
322 goto out;
324 out:
325 if (ret) {
326 release_mem_region(mmio_addr, mmio_len);
327 release_region(ioaddr, iolen);
328 }
330 return ret;
331 }
333 #define XEN_PLATFORM_VENDOR_ID 0x5853
334 #define XEN_PLATFORM_DEVICE_ID 0x0001
335 static struct pci_device_id platform_pci_tbl[] __devinitdata = {
336 {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID,
337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
338 /* Continue to recognise the old ID for now */
339 {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
340 {0,}
341 };
343 MODULE_DEVICE_TABLE(pci, platform_pci_tbl);
345 static struct pci_driver platform_driver = {
346 name: DRV_NAME,
347 probe: platform_pci_init,
348 id_table: platform_pci_tbl,
349 };
351 static int pci_device_registered;
353 void platform_pci_resume(void)
354 {
355 struct xen_add_to_physmap xatp;
357 resume_hypercall_stubs();
359 xatp.domid = DOMID_SELF;
360 xatp.idx = 0;
361 xatp.space = XENMAPSPACE_shared_info;
362 xatp.gpfn = shared_info_frame;
363 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
364 BUG();
366 if (set_callback_via(callback_via))
367 printk("platform_pci_resume failure!\n");
368 }
370 static int __init platform_pci_module_init(void)
371 {
372 int rc;
374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
375 rc = pci_module_init(&platform_driver);
376 #else
377 rc = pci_register_driver(&platform_driver);
378 #endif
379 if (rc) {
380 printk(KERN_INFO DRV_NAME
381 ": No platform pci device model found\n");
382 return rc;
383 }
385 pci_device_registered = 1;
386 return 0;
387 }
389 module_init(platform_pci_module_init);