direct-io.hg
changeset 10802:306d7857928c
[IA64] Save & restore.
xc_ia64_linux_save.c and xc_ia64_linux_restore.c added.
vcpu context has more registers and states (eg: tr registers).
Per cpu irqs are deallocated when cpu is switched off.
#if/#endif added in reboot.c for ia64.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xc_ia64_linux_save.c and xc_ia64_linux_restore.c added.
vcpu context has more registers and states (eg: tr registers).
Per cpu irqs are deallocated when cpu is switched off.
#if/#endif added in reboot.c for ia64.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/ia64/Kconfig Tue Jul 11 11:29:25 2006 -0600 1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig Tue Jul 11 12:51:18 2006 -0600 1.3 @@ -518,7 +518,7 @@ config XEN_DEVMEM 1.4 default n 1.5 1.6 config XEN_REBOOT 1.7 - default n 1.8 + default y 1.9 1.10 config XEN_SMPBOOT 1.11 default n
2.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c Tue Jul 11 11:29:25 2006 -0600 2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c Tue Jul 11 12:51:18 2006 -0600 2.3 @@ -31,6 +31,9 @@ 2.4 #include <linux/smp_lock.h> 2.5 #include <linux/threads.h> 2.6 #include <linux/bitops.h> 2.7 +#ifdef CONFIG_XEN 2.8 +#include <linux/cpu.h> 2.9 +#endif 2.10 2.11 #include <asm/delay.h> 2.12 #include <asm/intrinsics.h> 2.13 @@ -235,6 +238,9 @@ static struct irqaction ipi_irqaction = 2.14 #include <xen/evtchn.h> 2.15 #include <xen/interface/callback.h> 2.16 2.17 +static DEFINE_PER_CPU(int, timer_irq) = -1; 2.18 +static DEFINE_PER_CPU(int, ipi_irq) = -1; 2.19 +static DEFINE_PER_CPU(int, resched_irq) = -1; 2.20 static char timer_name[NR_CPUS][15]; 2.21 static char ipi_name[NR_CPUS][15]; 2.22 static char resched_name[NR_CPUS][15]; 2.23 @@ -294,6 +300,7 @@ xen_register_percpu_irq (unsigned int ir 2.24 ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu, 2.25 action->handler, action->flags, 2.26 timer_name[cpu], action->dev_id); 2.27 + per_cpu(timer_irq,cpu) = ret; 2.28 printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret); 2.29 break; 2.30 case IA64_IPI_RESCHEDULE: 2.31 @@ -301,6 +308,7 @@ xen_register_percpu_irq (unsigned int ir 2.32 ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu, 2.33 action->handler, action->flags, 2.34 resched_name[cpu], action->dev_id); 2.35 + per_cpu(resched_irq,cpu) = ret; 2.36 printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret); 2.37 break; 2.38 case IA64_IPI_VECTOR: 2.39 @@ -308,6 +316,7 @@ xen_register_percpu_irq (unsigned int ir 2.40 ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu, 2.41 action->handler, action->flags, 2.42 ipi_name[cpu], action->dev_id); 2.43 + per_cpu(ipi_irq,cpu) = ret; 2.44 printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret); 2.45 break; 2.46 case IA64_SPURIOUS_INT_VECTOR: 2.47 @@ -343,7 +352,7 @@ xen_bind_early_percpu_irq (void) 2.48 */ 2.49 for (i = 0; i < late_irq_cnt; i++) 2.50 xen_register_percpu_irq(saved_percpu_irqs[i].irq, 2.51 - saved_percpu_irqs[i].action, 0); 2.52 + saved_percpu_irqs[i].action, 0); 2.53 } 2.54 2.55 /* FIXME: There's no obvious point to check whether slab is ready. So 2.56 @@ -353,6 +362,38 @@ extern void (*late_time_init)(void); 2.57 extern char xen_event_callback; 2.58 extern void xen_init_IRQ(void); 2.59 2.60 +#ifdef CONFIG_HOTPLUG_CPU 2.61 +static int __devinit 2.62 +unbind_evtchn_callback(struct notifier_block *nfb, 2.63 + unsigned long action, void *hcpu) 2.64 +{ 2.65 + unsigned int cpu = (unsigned long)hcpu; 2.66 + 2.67 + if (action == CPU_DEAD) { 2.68 + /* Unregister evtchn. */ 2.69 + if (per_cpu(ipi_irq,cpu) >= 0) { 2.70 + unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL); 2.71 + per_cpu(ipi_irq, cpu) = -1; 2.72 + } 2.73 + if (per_cpu(resched_irq,cpu) >= 0) { 2.74 + unbind_from_irqhandler (per_cpu(resched_irq, cpu), 2.75 + NULL); 2.76 + per_cpu(resched_irq, cpu) = -1; 2.77 + } 2.78 + if (per_cpu(timer_irq,cpu) >= 0) { 2.79 + unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL); 2.80 + per_cpu(timer_irq, cpu) = -1; 2.81 + } 2.82 + } 2.83 + return NOTIFY_OK; 2.84 +} 2.85 + 2.86 +static struct notifier_block unbind_evtchn_notifier = { 2.87 + .notifier_call = unbind_evtchn_callback, 2.88 + .priority = 0 2.89 +}; 2.90 +#endif 2.91 + 2.92 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); 2.93 void xen_smp_intr_init(void) 2.94 { 2.95 @@ -363,21 +404,22 @@ void xen_smp_intr_init(void) 2.96 .type = CALLBACKTYPE_event, 2.97 .address = (unsigned long)&xen_event_callback, 2.98 }; 2.99 - static cpumask_t registered_cpumask; 2.100 2.101 - if (!cpu) 2.102 + if (cpu == 0) { 2.103 + /* Initialization was already done for boot cpu. */ 2.104 +#ifdef CONFIG_HOTPLUG_CPU 2.105 + /* Register the notifier only once. */ 2.106 + register_cpu_notifier(&unbind_evtchn_notifier); 2.107 +#endif 2.108 return; 2.109 + } 2.110 2.111 /* This should be piggyback when setup vcpu guest context */ 2.112 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); 2.113 2.114 - if (!cpu_isset(cpu, registered_cpumask)) { 2.115 - cpu_set(cpu, registered_cpumask); 2.116 - for (i = 0; i < saved_irq_cnt; i++) 2.117 - xen_register_percpu_irq(saved_percpu_irqs[i].irq, 2.118 - saved_percpu_irqs[i].action, 2.119 - 0); 2.120 - } 2.121 + for (i = 0; i < saved_irq_cnt; i++) 2.122 + xen_register_percpu_irq(saved_percpu_irqs[i].irq, 2.123 + saved_percpu_irqs[i].action, 0); 2.124 #endif /* CONFIG_SMP */ 2.125 } 2.126 #endif /* CONFIG_XEN */ 2.127 @@ -388,12 +430,13 @@ register_percpu_irq (ia64_vector vec, st 2.128 irq_desc_t *desc; 2.129 unsigned int irq; 2.130 2.131 +#ifdef CONFIG_XEN 2.132 + if (is_running_on_xen()) 2.133 + return xen_register_percpu_irq(vec, action, 1); 2.134 +#endif 2.135 + 2.136 for (irq = 0; irq < NR_IRQS; ++irq) 2.137 if (irq_to_vector(irq) == vec) { 2.138 -#ifdef CONFIG_XEN 2.139 - if (is_running_on_xen()) 2.140 - return xen_register_percpu_irq(vec, action, 1); 2.141 -#endif 2.142 desc = irq_descp(irq); 2.143 desc->status |= IRQ_PER_CPU; 2.144 desc->handler = &irq_type_ia64_lsapic; 2.145 @@ -406,6 +449,8 @@ void __init 2.146 init_IRQ (void) 2.147 { 2.148 #ifdef CONFIG_XEN 2.149 + printk(KERN_INFO "init_IRQ called from %p\n", 2.150 + __builtin_return_address (0)); 2.151 /* Maybe put into platform_irq_init later */ 2.152 if (is_running_on_xen()) { 2.153 struct callback_register event = {
3.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c Tue Jul 11 11:29:25 2006 -0600 3.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c Tue Jul 11 12:51:18 2006 -0600 3.3 @@ -795,3 +795,13 @@ direct_remap_pfn_range(struct vm_area_st 3.4 return error; 3.5 } 3.6 3.7 + 3.8 +/* Called after suspend, to resume time. */ 3.9 +void 3.10 +time_resume(void) 3.11 +{ 3.12 + extern void ia64_cpu_local_tick(void); 3.13 + 3.14 + /* Just trigger a tick. */ 3.15 + ia64_cpu_local_tick(); 3.16 +}
4.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S Tue Jul 11 11:29:25 2006 -0600 4.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S Tue Jul 11 12:51:18 2006 -0600 4.3 @@ -33,3 +33,23 @@ GLOBAL_ENTRY(early_xen_setup) 4.4 br.ret.sptk.many rp 4.5 ;; 4.6 END(early_xen_setup) 4.7 + 4.8 +#include <xen/interface/xen.h> 4.9 + 4.10 +/* Stub for suspend. 4.11 + Just force the stacked registers to be written in memory. */ 4.12 +GLOBAL_ENTRY(HYPERVISOR_suspend) 4.13 + alloc r20=ar.pfs,0,0,0,0 4.14 + mov r14=2 4.15 + mov r15=r12 4.16 + ;; 4.17 + /* We don't want to deal with RSE. */ 4.18 + flushrs 4.19 + mov r2=__HYPERVISOR_sched_op 4.20 + st4 [r12]=r14 4.21 + ;; 4.22 + break 0x1000 4.23 + ;; 4.24 + mov ar.pfs=r20 4.25 + br.ret.sptk.many b0 4.26 +END(HYPERVISOR_suspend)
5.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c Tue Jul 11 11:29:25 2006 -0600 5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c Tue Jul 11 12:51:18 2006 -0600 5.3 @@ -39,6 +39,7 @@ extern void ctrl_alt_del(void); 5.4 */ 5.5 #define SHUTDOWN_HALT 4 5.6 5.7 +#if defined(__i386__) || defined(__x86_64__) 5.8 void machine_emergency_restart(void) 5.9 { 5.10 /* We really want to get pending console data out before we die. */ 5.11 @@ -60,10 +61,8 @@ void machine_power_off(void) 5.12 { 5.13 /* We really want to get pending console data out before we die. */ 5.14 xencons_force_flush(); 5.15 -#if defined(__i386__) || defined(__x86_64__) 5.16 if (pm_power_off) 5.17 pm_power_off(); 5.18 -#endif 5.19 HYPERVISOR_shutdown(SHUTDOWN_poweroff); 5.20 } 5.21 5.22 @@ -71,7 +70,7 @@ int reboot_thru_bios = 0; /* for dmi_sca 5.23 EXPORT_SYMBOL(machine_restart); 5.24 EXPORT_SYMBOL(machine_halt); 5.25 EXPORT_SYMBOL(machine_power_off); 5.26 - 5.27 +#endif 5.28 5.29 /****************************************************************************** 5.30 * Stop/pickle callback handling. 5.31 @@ -82,6 +81,7 @@ static int shutting_down = SHUTDOWN_INVA 5.32 static void __shutdown_handler(void *unused); 5.33 static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL); 5.34 5.35 +#if defined(__i386__) || defined(__x86_64__) 5.36 /* Ensure we run on the idle task page tables so that we will 5.37 switch page tables before running user space. This is needed 5.38 on architectures with separate kernel and user page tables 5.39 @@ -98,25 +98,30 @@ static void switch_idle_mm(void) 5.40 current->active_mm = &init_mm; 5.41 mmdrop(mm); 5.42 } 5.43 +#endif 5.44 5.45 static int __do_suspend(void *ignore) 5.46 { 5.47 - int i, j, k, fpp, err; 5.48 - 5.49 + int err; 5.50 +#if defined(__i386__) || defined(__x86_64__) 5.51 + int i, j, k, fpp; 5.52 extern unsigned long max_pfn; 5.53 extern unsigned long *pfn_to_mfn_frame_list_list; 5.54 extern unsigned long *pfn_to_mfn_frame_list[]; 5.55 +#endif 5.56 5.57 extern void time_resume(void); 5.58 5.59 BUG_ON(smp_processor_id() != 0); 5.60 BUG_ON(in_interrupt()); 5.61 5.62 +#if defined(__i386__) || defined(__x86_64__) 5.63 if (xen_feature(XENFEAT_auto_translated_physmap)) { 5.64 printk(KERN_WARNING "Cannot suspend in " 5.65 "auto_translated_physmap mode.\n"); 5.66 return -EOPNOTSUPP; 5.67 } 5.68 +#endif 5.69 5.70 err = smp_suspend(); 5.71 if (err) 5.72 @@ -129,18 +134,24 @@ static int __do_suspend(void *ignore) 5.73 #ifdef __i386__ 5.74 kmem_cache_shrink(pgd_cache); 5.75 #endif 5.76 +#if defined(__i386__) || defined(__x86_64__) 5.77 mm_pin_all(); 5.78 5.79 __cli(); 5.80 +#elif defined(__ia64__) 5.81 + local_irq_disable(); 5.82 +#endif 5.83 preempt_enable(); 5.84 5.85 gnttab_suspend(); 5.86 5.87 +#if defined(__i386__) || defined(__x86_64__) 5.88 HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; 5.89 clear_fixmap(FIX_SHARED_INFO); 5.90 5.91 xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); 5.92 xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn); 5.93 +#endif 5.94 5.95 /* 5.96 * We'll stop somewhere inside this hypercall. When it returns, 5.97 @@ -150,6 +161,7 @@ static int __do_suspend(void *ignore) 5.98 5.99 shutting_down = SHUTDOWN_INVALID; 5.100 5.101 +#if defined(__i386__) || defined(__x86_64__) 5.102 set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info); 5.103 5.104 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); 5.105 @@ -171,6 +183,7 @@ static int __do_suspend(void *ignore) 5.106 virt_to_mfn(&phys_to_machine_mapping[i]); 5.107 } 5.108 HYPERVISOR_shared_info->arch.max_pfn = max_pfn; 5.109 +#endif 5.110 5.111 gnttab_resume(); 5.112 5.113 @@ -178,9 +191,13 @@ static int __do_suspend(void *ignore) 5.114 5.115 time_resume(); 5.116 5.117 +#if defined(__i386__) || defined(__x86_64__) 5.118 switch_idle_mm(); 5.119 5.120 __sti(); 5.121 +#elif defined(__ia64__) 5.122 + local_irq_enable(); 5.123 +#endif 5.124 5.125 xencons_resume(); 5.126
6.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue Jul 11 11:29:25 2006 -0600 6.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue Jul 11 12:51:18 2006 -0600 6.3 @@ -302,23 +302,7 @@ HYPERVISOR_vcpu_op( 6.4 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); 6.5 } 6.6 6.7 -static inline int 6.8 -HYPERVISOR_suspend( 6.9 - unsigned long srec) 6.10 -{ 6.11 - struct sched_shutdown sched_shutdown = { 6.12 - .reason = SHUTDOWN_suspend 6.13 - }; 6.14 - 6.15 - int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, 6.16 - &sched_shutdown, srec); 6.17 - 6.18 - if (rc == -ENOSYS) 6.19 - rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, 6.20 - SHUTDOWN_suspend, srec); 6.21 - 6.22 - return rc; 6.23 -} 6.24 +extern int HYPERVISOR_suspend(unsigned long srec); 6.25 6.26 static inline int 6.27 HYPERVISOR_callback_op(
7.1 --- a/tools/libxc/ia64/Makefile Tue Jul 11 11:29:25 2006 -0600 7.2 +++ b/tools/libxc/ia64/Makefile Tue Jul 11 12:51:18 2006 -0600 7.3 @@ -1,3 +1,5 @@ 7.4 CTRL_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_stubs.c 7.5 7.6 GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_hvm_build.c 7.7 +GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_linux_save.c 7.8 +GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_linux_restore.c
8.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 8.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c Tue Jul 11 12:51:18 2006 -0600 8.3 @@ -0,0 +1,320 @@ 8.4 +/****************************************************************************** 8.5 + * xc_ia64_linux_restore.c 8.6 + * 8.7 + * Restore the state of a Linux session. 8.8 + * 8.9 + * Copyright (c) 2003, K A Fraser. 8.10 + * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net> 8.11 + */ 8.12 + 8.13 +#include <stdlib.h> 8.14 +#include <unistd.h> 8.15 + 8.16 +#include "xg_private.h" 8.17 + 8.18 +#define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10)) 8.19 + 8.20 +/* total number of pages used by the current guest */ 8.21 +static unsigned long max_pfn; 8.22 + 8.23 +static ssize_t 8.24 +read_exact(int fd, void *buf, size_t count) 8.25 +{ 8.26 + int r = 0, s; 8.27 + unsigned char *b = buf; 8.28 + 8.29 + while (r < count) { 8.30 + s = read(fd, &b[r], count - r); 8.31 + if ((s == -1) && (errno == EINTR)) 8.32 + continue; 8.33 + if (s <= 0) { 8.34 + break; 8.35 + } 8.36 + r += s; 8.37 + } 8.38 + 8.39 + return (r == count) ? 1 : 0; 8.40 +} 8.41 + 8.42 +static int 8.43 +read_page(int xc_handle, int io_fd, uint32_t dom, unsigned long pfn) 8.44 +{ 8.45 + void *mem; 8.46 + 8.47 + mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 8.48 + PROT_READ|PROT_WRITE, pfn); 8.49 + if (mem == NULL) { 8.50 + ERR("cannot map page"); 8.51 + return -1; 8.52 + } 8.53 + if (!read_exact(io_fd, mem, PAGE_SIZE)) { 8.54 + ERR("Error when reading from state file (5)"); 8.55 + return -1; 8.56 + } 8.57 + munmap(mem, PAGE_SIZE); 8.58 + return 0; 8.59 +} 8.60 + 8.61 +int 8.62 +xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, 8.63 + unsigned long nr_pfns, unsigned int store_evtchn, 8.64 + unsigned long *store_mfn, unsigned int console_evtchn, 8.65 + unsigned long *console_mfn) 8.66 +{ 8.67 + DECLARE_DOM0_OP; 8.68 + int rc = 1, i; 8.69 + unsigned long mfn, pfn; 8.70 + unsigned long ver; 8.71 + 8.72 + /* The new domain's shared-info frame number. */ 8.73 + unsigned long shared_info_frame; 8.74 + unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */ 8.75 + shared_info_t *shared_info = (shared_info_t *)shared_info_page; 8.76 + 8.77 + /* A copy of the CPU context of the guest. */ 8.78 + vcpu_guest_context_t ctxt; 8.79 + 8.80 + unsigned long *page_array = NULL; 8.81 + 8.82 + /* A temporary mapping of the guest's start_info page. */ 8.83 + start_info_t *start_info; 8.84 + 8.85 + max_pfn = nr_pfns; 8.86 + 8.87 + DPRINTF("xc_linux_restore start: max_pfn = %ld\n", max_pfn); 8.88 + 8.89 + 8.90 + if (!read_exact(io_fd, &ver, sizeof(unsigned long))) { 8.91 + ERR("Error when reading version"); 8.92 + goto out; 8.93 + } 8.94 + if (ver != 1) { 8.95 + ERR("version of save doesn't match"); 8.96 + goto out; 8.97 + } 8.98 + 8.99 + if (mlock(&ctxt, sizeof(ctxt))) { 8.100 + /* needed for build dom0 op, but might as well do early */ 8.101 + ERR("Unable to mlock ctxt"); 8.102 + return 1; 8.103 + } 8.104 + 8.105 + /* Get the domain's shared-info frame. */ 8.106 + op.cmd = DOM0_GETDOMAININFO; 8.107 + op.u.getdomaininfo.domain = (domid_t)dom; 8.108 + if (xc_dom0_op(xc_handle, &op) < 0) { 8.109 + ERR("Could not get information on new domain"); 8.110 + goto out; 8.111 + } 8.112 + shared_info_frame = op.u.getdomaininfo.shared_info_frame; 8.113 + 8.114 + if (xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) { 8.115 + errno = ENOMEM; 8.116 + goto out; 8.117 + } 8.118 + 8.119 + if (xc_domain_memory_increase_reservation(xc_handle, dom, max_pfn, 8.120 + 0, 0, NULL) != 0) { 8.121 + ERR("Failed to increase reservation by %ld KB", PFN_TO_KB(max_pfn)); 8.122 + errno = ENOMEM; 8.123 + goto out; 8.124 + } 8.125 + 8.126 + DPRINTF("Increased domain reservation by %ld KB\n", PFN_TO_KB(max_pfn)); 8.127 + 8.128 + if (!read_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) { 8.129 + ERR("read: domain setup"); 8.130 + goto out; 8.131 + } 8.132 + 8.133 + /* Build firmware (will be overwritten). */ 8.134 + op.u.domain_setup.domain = (domid_t)dom; 8.135 + op.u.domain_setup.flags &= ~XEN_DOMAINSETUP_query; 8.136 + op.u.domain_setup.bp = ((nr_pfns - 3) << PAGE_SHIFT) 8.137 + + sizeof (start_info_t); 8.138 + op.u.domain_setup.maxmem = (nr_pfns - 3) << PAGE_SHIFT; 8.139 + 8.140 + op.cmd = DOM0_DOMAIN_SETUP; 8.141 + if (xc_dom0_op(xc_handle, &op)) 8.142 + goto out; 8.143 + 8.144 + /* Get pages. */ 8.145 + page_array = malloc(max_pfn * sizeof(unsigned long)); 8.146 + if (page_array == NULL ) { 8.147 + ERR("Could not allocate memory"); 8.148 + goto out; 8.149 + } 8.150 + 8.151 + if (xc_ia64_get_pfn_list(xc_handle, dom, page_array, 8.152 + 0, max_pfn) != max_pfn) { 8.153 + ERR("Could not get the page frame list"); 8.154 + goto out; 8.155 + } 8.156 + 8.157 + DPRINTF("Reloading memory pages: 0%%\n"); 8.158 + 8.159 + while (1) { 8.160 + if (!read_exact(io_fd, &mfn, sizeof(unsigned long))) { 8.161 + ERR("Error when reading batch size"); 8.162 + goto out; 8.163 + } 8.164 + if (mfn == INVALID_MFN) 8.165 + break; 8.166 + 8.167 + pfn = page_array[mfn]; 8.168 + 8.169 + DPRINTF ("xc_linux_restore: page %lu/%lu at %lx\n", mfn, max_pfn, pfn); 8.170 + 8.171 + if (read_page(xc_handle, io_fd, dom, page_array[mfn]) < 0) 8.172 + goto out; 8.173 + } 8.174 + 8.175 + DPRINTF("Received all pages\n"); 8.176 + 8.177 + /* Get the list of PFNs that are not in the psuedo-phys map */ 8.178 + { 8.179 + unsigned int count; 8.180 + unsigned long *pfntab; 8.181 + int rc; 8.182 + 8.183 + if (!read_exact(io_fd, &count, sizeof(count))) { 8.184 + ERR("Error when reading pfn count"); 8.185 + goto out; 8.186 + } 8.187 + 8.188 + pfntab = malloc(sizeof(unsigned long) * count); 8.189 + if (!pfntab) { 8.190 + ERR("Out of memory"); 8.191 + goto out; 8.192 + } 8.193 + 8.194 + if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) { 8.195 + ERR("Error when reading pfntab"); 8.196 + goto out; 8.197 + } 8.198 + 8.199 + DPRINTF ("Try to free %u pages\n", count); 8.200 + 8.201 + for (i = 0; i < count; i++) { 8.202 + 8.203 + volatile unsigned long pfn; 8.204 + 8.205 + struct xen_memory_reservation reservation = { 8.206 + .nr_extents = 1, 8.207 + .extent_order = 0, 8.208 + .domid = dom 8.209 + }; 8.210 + set_xen_guest_handle(reservation.extent_start, 8.211 + (unsigned long *)&pfn); 8.212 + 8.213 + pfn = pfntab[i]; 8.214 + rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation, 8.215 + &reservation); 8.216 + if (rc != 1) { 8.217 + ERR("Could not decrease reservation : %d", rc); 8.218 + goto out; 8.219 + } 8.220 + } 8.221 + 8.222 + DPRINTF("Decreased reservation by %d pages\n", count); 8.223 + } 8.224 + 8.225 + 8.226 + if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) { 8.227 + ERR("Error when reading ctxt"); 8.228 + goto out; 8.229 + } 8.230 + 8.231 + /* First to initialize. */ 8.232 + op.cmd = DOM0_SETVCPUCONTEXT; 8.233 + op.u.setvcpucontext.domain = (domid_t)dom; 8.234 + op.u.setvcpucontext.vcpu = 0; 8.235 + set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt); 8.236 + if (xc_dom0_op(xc_handle, &op) != 0) { 8.237 + ERR("Couldn't set vcpu context"); 8.238 + goto out; 8.239 + } 8.240 + 8.241 + /* Second to set registers... */ 8.242 + ctxt.flags = VGCF_EXTRA_REGS; 8.243 + op.cmd = DOM0_SETVCPUCONTEXT; 8.244 + op.u.setvcpucontext.domain = (domid_t)dom; 8.245 + op.u.setvcpucontext.vcpu = 0; 8.246 + set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt); 8.247 + if (xc_dom0_op(xc_handle, &op) != 0) { 8.248 + ERR("Couldn't set vcpu context"); 8.249 + goto out; 8.250 + } 8.251 + 8.252 + /* Just a check. */ 8.253 + if (xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, &ctxt)) { 8.254 + ERR("Could not get vcpu context"); 8.255 + goto out; 8.256 + } 8.257 + 8.258 + /* Then get privreg page. */ 8.259 + if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) { 8.260 + ERR("Could not read vcpu privregs"); 8.261 + goto out; 8.262 + } 8.263 + 8.264 + /* Read shared info. */ 8.265 + shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 8.266 + PROT_READ|PROT_WRITE, shared_info_frame); 8.267 + if (shared_info == NULL) { 8.268 + ERR("cannot map page"); 8.269 + goto out; 8.270 + } 8.271 + if (!read_exact(io_fd, shared_info, PAGE_SIZE)) { 8.272 + ERR("Error when reading shared_info page"); 8.273 + goto out; 8.274 + } 8.275 + 8.276 + /* clear any pending events and the selector */ 8.277 + memset(&(shared_info->evtchn_pending[0]), 0, 8.278 + sizeof (shared_info->evtchn_pending)); 8.279 + for (i = 0; i < MAX_VIRT_CPUS; i++) 8.280 + shared_info->vcpu_info[i].evtchn_pending_sel = 0; 8.281 + 8.282 + mfn = page_array[shared_info->arch.start_info_pfn]; 8.283 + 8.284 + munmap (shared_info, PAGE_SIZE); 8.285 + 8.286 + /* Uncanonicalise the suspend-record frame number and poke resume rec. */ 8.287 + start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 8.288 + PROT_READ | PROT_WRITE, mfn); 8.289 + start_info->nr_pages = max_pfn; 8.290 + start_info->shared_info = shared_info_frame << PAGE_SHIFT; 8.291 + start_info->flags = 0; 8.292 + *store_mfn = page_array[start_info->store_mfn]; 8.293 + start_info->store_evtchn = store_evtchn; 8.294 + *console_mfn = page_array[start_info->console_mfn]; 8.295 + start_info->console_evtchn = console_evtchn; 8.296 + munmap(start_info, PAGE_SIZE); 8.297 + 8.298 + /* 8.299 + * Safety checking of saved context: 8.300 + * 1. user_regs is fine, as Xen checks that on context switch. 8.301 + * 2. fpu_ctxt is fine, as it can't hurt Xen. 8.302 + * 3. trap_ctxt needs the code selectors checked. 8.303 + * 4. ldt base must be page-aligned, no more than 8192 ents, ... 8.304 + * 5. gdt already done, and further checking is done by Xen. 8.305 + * 6. check that kernel_ss is safe. 8.306 + * 7. pt_base is already done. 8.307 + * 8. debugregs are checked by Xen. 8.308 + * 9. callback code selectors need checking. 8.309 + */ 8.310 + DPRINTF("Domain ready to be built.\n"); 8.311 + 8.312 + rc = 0; 8.313 + 8.314 + out: 8.315 + if ((rc != 0) && (dom != 0)) 8.316 + xc_domain_destroy(xc_handle, dom); 8.317 + 8.318 + free (page_array); 8.319 + 8.320 + DPRINTF("Restore exit with rc=%d\n", rc); 8.321 + 8.322 + return rc; 8.323 +}
9.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 9.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c Tue Jul 11 12:51:18 2006 -0600 9.3 @@ -0,0 +1,295 @@ 9.4 +/****************************************************************************** 9.5 + * xc_ia64_linux_save.c 9.6 + * 9.7 + * Save the state of a running Linux session. 9.8 + * 9.9 + * Copyright (c) 2003, K A Fraser. 9.10 + * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net> 9.11 + */ 9.12 + 9.13 +#include <inttypes.h> 9.14 +#include <time.h> 9.15 +#include <stdlib.h> 9.16 +#include <unistd.h> 9.17 +#include <sys/time.h> 9.18 + 9.19 +#include "xg_private.h" 9.20 + 9.21 +/* total number of pages used by the current guest */ 9.22 +static unsigned long max_pfn; 9.23 + 9.24 +static inline ssize_t 9.25 +write_exact(int fd, void *buf, size_t count) 9.26 +{ 9.27 + if (write(fd, buf, count) != count) 9.28 + return 0; 9.29 + return 1; 9.30 +} 9.31 + 9.32 +static int 9.33 +suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd, 9.34 + int dom, xc_dominfo_t *info) 9.35 +{ 9.36 + int i = 0; 9.37 + 9.38 + if (!(*suspend)(dom)) { 9.39 + ERR("Suspend request failed"); 9.40 + return -1; 9.41 + } 9.42 + 9.43 +retry: 9.44 + 9.45 + if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) { 9.46 + ERR("Could not get domain info"); 9.47 + return -1; 9.48 + } 9.49 + 9.50 + if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend) 9.51 + return 0; // success 9.52 + 9.53 + if (info->paused) { 9.54 + // try unpausing domain, wait, and retest 9.55 + xc_domain_unpause(xc_handle, dom); 9.56 + 9.57 + ERR("Domain was paused. Wait and re-test."); 9.58 + usleep(10000); // 10ms 9.59 + 9.60 + goto retry; 9.61 + } 9.62 + 9.63 + 9.64 + if(++i < 100) { 9.65 + ERR("Retry suspend domain."); 9.66 + usleep(10000); // 10ms 9.67 + goto retry; 9.68 + } 9.69 + 9.70 + ERR("Unable to suspend domain."); 9.71 + 9.72 + return -1; 9.73 +} 9.74 + 9.75 +int 9.76 +xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 9.77 + uint32_t max_factor, uint32_t flags, int (*suspend)(int)) 9.78 +{ 9.79 + DECLARE_DOM0_OP; 9.80 + xc_dominfo_t info; 9.81 + 9.82 + int rc = 1; 9.83 + unsigned long N; 9.84 + 9.85 + //int live = (flags & XCFLAGS_LIVE); 9.86 + int debug = (flags & XCFLAGS_DEBUG); 9.87 + 9.88 + /* The new domain's shared-info frame number. */ 9.89 + unsigned long shared_info_frame; 9.90 + 9.91 + /* A copy of the CPU context of the guest. */ 9.92 + vcpu_guest_context_t ctxt; 9.93 + 9.94 + unsigned long *page_array = NULL; 9.95 + 9.96 + /* Live mapping of shared info structure */ 9.97 + shared_info_t *live_shinfo = NULL; 9.98 + 9.99 + char *mem; 9.100 + 9.101 + if (debug) 9.102 + fprintf (stderr, "xc_linux_save (ia64): started dom=%d\n", dom); 9.103 + 9.104 + if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) { 9.105 + ERR("Could not get domain info"); 9.106 + return 1; 9.107 + } 9.108 + 9.109 + shared_info_frame = info.shared_info_frame; 9.110 + 9.111 +#if 0 9.112 + /* cheesy sanity check */ 9.113 + if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) { 9.114 + ERR("Invalid state record -- pfn count out of range: %lu", 9.115 + (info.max_memkb >> (PAGE_SHIFT - 10))); 9.116 + goto out; 9.117 + } 9.118 +#endif 9.119 + 9.120 + /* Map the shared info frame */ 9.121 + live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 9.122 + PROT_READ, shared_info_frame); 9.123 + if (!live_shinfo) { 9.124 + ERR("Couldn't map live_shinfo"); 9.125 + goto out; 9.126 + } 9.127 + 9.128 + max_pfn = info.max_memkb >> (PAGE_SHIFT - 10); 9.129 + 9.130 + 9.131 + /* This is a non-live suspend. Issue the call back to get the 9.132 + domain suspended */ 9.133 + 9.134 + if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) { 9.135 + ERR("Domain appears not to have suspended"); 9.136 + goto out; 9.137 + } 9.138 + 9.139 + page_array = malloc(max_pfn * sizeof(unsigned long)); 9.140 + if (page_array == NULL) { 9.141 + ERR("Could not allocate memory"); 9.142 + goto out; 9.143 + } 9.144 + 9.145 + if (xc_ia64_get_pfn_list(xc_handle, dom, page_array, 9.146 + 0, max_pfn) != max_pfn) { 9.147 + ERR("Could not get the page frame list"); 9.148 + goto out; 9.149 + } 9.150 + 9.151 + /* This is expected by xm restore. */ 9.152 + if (!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) { 9.153 + ERR("write: max_pfn"); 9.154 + goto out; 9.155 + } 9.156 + 9.157 + /* xc_linux_restore starts to read here. */ 9.158 + /* Write a version number. This can avoid searching for a stupid bug 9.159 + if the format change. 9.160 + The version is hard-coded, don't forget to change the restore code 9.161 + too! */ 9.162 + N = 1; 9.163 + if (!write_exact(io_fd, &N, sizeof(unsigned long))) { 9.164 + ERR("write: version"); 9.165 + goto out; 9.166 + } 9.167 + 9.168 + op.cmd = DOM0_DOMAIN_SETUP; 9.169 + op.u.domain_setup.domain = (domid_t)dom; 9.170 + op.u.domain_setup.flags = XEN_DOMAINSETUP_query; 9.171 + if (xc_dom0_op(xc_handle, &op) < 0) { 9.172 + ERR("Could not get domain setup"); 9.173 + goto out; 9.174 + } 9.175 + op.u.domain_setup.domain = 0; 9.176 + if (!write_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) { 9.177 + ERR("write: domain setup"); 9.178 + goto out; 9.179 + } 9.180 + 9.181 + /* Start writing out the saved-domain record. */ 9.182 + for (N = 0; N < max_pfn; N++) { 9.183 + if (page_array[N] == INVALID_MFN) 9.184 + continue; 9.185 + if (debug) 9.186 + fprintf (stderr, "xc_linux_save: page %lx (%lu/%lu)\n", 9.187 + page_array[N], N, max_pfn); 9.188 + 9.189 + if (!write_exact(io_fd, &N, sizeof(N))) { 9.190 + ERR("write: max_pfn"); 9.191 + goto out; 9.192 + } 9.193 + 9.194 + mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 9.195 + PROT_READ|PROT_WRITE, page_array[N]); 9.196 + if (mem == NULL) { 9.197 + ERR("cannot map page"); 9.198 + goto out; 9.199 + } 9.200 + if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) { 9.201 + ERR("Error when writing to state file (5)"); 9.202 + goto out; 9.203 + } 9.204 + munmap(mem, PAGE_SIZE); 9.205 + } 9.206 + 9.207 + fprintf (stderr, "All memory is saved\n"); 9.208 + 9.209 + /* terminate */ 9.210 + N = INVALID_MFN; 9.211 + if (!write_exact(io_fd, &N, sizeof(N))) { 9.212 + ERR("Error when writing to state file (6)"); 9.213 + goto out; 9.214 + } 9.215 + 9.216 + /* Send through a list of all the PFNs that were not in map at the close */ 9.217 + { 9.218 + unsigned int i,j; 9.219 + unsigned long pfntab[1024]; 9.220 + 9.221 + for (i = 0, j = 0; i < max_pfn; i++) { 9.222 + if (page_array[i] == INVALID_MFN) 9.223 + j++; 9.224 + } 9.225 + 9.226 + if (!write_exact(io_fd, &j, sizeof(unsigned int))) { 9.227 + ERR("Error when writing to state file (6a)"); 9.228 + goto out; 9.229 + } 9.230 + 9.231 + for (i = 0, j = 0; i < max_pfn; ) { 9.232 + 9.233 + if (page_array[i] == INVALID_MFN) 9.234 + pfntab[j++] = i; 9.235 + 9.236 + i++; 9.237 + if (j == 1024 || i == max_pfn) { 9.238 + if (!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) { 9.239 + ERR("Error when writing to state file (6b)"); 9.240 + goto out; 9.241 + } 9.242 + j = 0; 9.243 + } 9.244 + } 9.245 + 9.246 + } 9.247 + 9.248 + if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) { 9.249 + ERR("Could not get vcpu context"); 9.250 + goto out; 9.251 + } 9.252 + 9.253 + if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) { 9.254 + ERR("Error when writing to state file (1)"); 9.255 + goto out; 9.256 + } 9.257 + 9.258 + mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 9.259 + PROT_READ|PROT_WRITE, ctxt.privregs_pfn); 9.260 + if (mem == NULL) { 9.261 + ERR("cannot map privreg page"); 9.262 + goto out; 9.263 + } 9.264 + if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) { 9.265 + ERR("Error when writing privreg to state file (5)"); 9.266 + goto out; 9.267 + } 9.268 + munmap(mem, PAGE_SIZE); 9.269 + 9.270 + if (!write_exact(io_fd, live_shinfo, PAGE_SIZE)) { 9.271 + ERR("Error when writing to state file (1)"); 9.272 + goto out; 9.273 + } 9.274 + 9.275 + /* Success! */ 9.276 + rc = 0; 9.277 + 9.278 + out: 9.279 + 9.280 + free (page_array); 9.281 + 9.282 + if (live_shinfo) 9.283 + munmap(live_shinfo, PAGE_SIZE); 9.284 + 9.285 + fprintf(stderr,"Save exit rc=%d\n",rc); 9.286 + 9.287 + return !!rc; 9.288 +} 9.289 + 9.290 +/* 9.291 + * Local variables: 9.292 + * mode: C 9.293 + * c-set-style: "BSD" 9.294 + * c-basic-offset: 4 9.295 + * tab-width: 4 9.296 + * indent-tabs-mode: nil 9.297 + * End: 9.298 + */
10.1 --- a/tools/libxc/ia64/xc_ia64_stubs.c Tue Jul 11 11:29:25 2006 -0600 10.2 +++ b/tools/libxc/ia64/xc_ia64_stubs.c Tue Jul 11 12:51:18 2006 -0600 10.3 @@ -23,24 +23,6 @@ xc_ia64_fpsr_default(void) 10.4 return FPSR_DEFAULT; 10.5 } 10.6 10.7 -int 10.8 -xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 10.9 - uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */, 10.10 - int (*suspend)(int domid)) 10.11 -{ 10.12 - PERROR("xc_linux_save not implemented\n"); 10.13 - return -1; 10.14 -} 10.15 - 10.16 -int 10.17 -xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, unsigned long nr_pfns, 10.18 - unsigned int store_evtchn, unsigned long *store_mfn, 10.19 - unsigned int console_evtchn, unsigned long *console_mfn) 10.20 -{ 10.21 - PERROR("xc_linux_restore not implemented\n"); 10.22 - return -1; 10.23 -} 10.24 - 10.25 /* 10.26 VMM uses put_user to copy pfn_list to guest buffer, this maybe fail, 10.27 VMM doesn't handle this now.
11.1 --- a/tools/libxc/xc_linux_build.c Tue Jul 11 11:29:25 2006 -0600 11.2 +++ b/tools/libxc/xc_linux_build.c Tue Jul 11 12:51:18 2006 -0600 11.3 @@ -498,6 +498,7 @@ static int setup_guest(int xc_handle, 11.4 start_info_mpa = (nr_pages - 3) << PAGE_SHIFT; 11.5 11.6 /* Build firmware. */ 11.7 + memset(&op.u.domain_setup, 0, sizeof(op.u.domain_setup)); 11.8 op.u.domain_setup.flags = 0; 11.9 op.u.domain_setup.domain = (domid_t)dom; 11.10 op.u.domain_setup.bp = start_info_mpa + sizeof (start_info_t);
12.1 --- a/xen/arch/ia64/xen/dom0_ops.c Tue Jul 11 11:29:25 2006 -0600 12.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Jul 11 12:51:18 2006 -0600 12.3 @@ -224,19 +224,43 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_ 12.4 break; 12.5 } 12.6 12.7 - if (ds->flags & XEN_DOMAINSETUP_hvm_guest) { 12.8 - if (!vmx_enabled) { 12.9 - printk("No VMX hardware feature for vmx domain.\n"); 12.10 - ret = -EINVAL; 12.11 - break; 12.12 - } 12.13 - d->arch.is_vti = 1; 12.14 - vmx_setup_platform(d); 12.15 + if (ds->flags & XEN_DOMAINSETUP_query) { 12.16 + /* Set flags. */ 12.17 + if (d->arch.is_vti) 12.18 + ds->flags |= XEN_DOMAINSETUP_hvm_guest; 12.19 + /* Set params. */ 12.20 + ds->bp = 0; /* unknown. */ 12.21 + ds->maxmem = 0; /* unknown. */ 12.22 + ds->xsi_va = d->arch.shared_info_va; 12.23 + ds->hypercall_imm = d->arch.breakimm; 12.24 + /* Copy back. */ 12.25 + if ( copy_to_guest(u_dom0_op, op, 1) ) 12.26 + ret = -EFAULT; 12.27 } 12.28 else { 12.29 - build_physmap_table(d); 12.30 - dom_fw_setup(d, ds->bp, ds->maxmem); 12.31 + if (ds->flags & XEN_DOMAINSETUP_hvm_guest) { 12.32 + if (!vmx_enabled) { 12.33 + printk("No VMX hardware feature for vmx domain.\n"); 12.34 + ret = -EINVAL; 12.35 + break; 12.36 + } 12.37 + d->arch.is_vti = 1; 12.38 + vmx_setup_platform(d); 12.39 + } 12.40 + else { 12.41 + build_physmap_table(d); 12.42 + dom_fw_setup(d, ds->bp, ds->maxmem); 12.43 + if (ds->xsi_va) 12.44 + d->arch.shared_info_va = ds->xsi_va; 12.45 + if (ds->hypercall_imm) { 12.46 + struct vcpu *v; 12.47 + d->arch.breakimm = ds->hypercall_imm; 12.48 + for_each_vcpu (d, v) 12.49 + v->arch.breakimm = d->arch.breakimm; 12.50 + } 12.51 + } 12.52 } 12.53 + 12.54 put_domain(d); 12.55 } 12.56 break;
13.1 --- a/xen/arch/ia64/xen/domain.c Tue Jul 11 11:29:25 2006 -0600 13.2 +++ b/xen/arch/ia64/xen/domain.c Tue Jul 11 12:51:18 2006 -0600 13.3 @@ -407,8 +407,28 @@ void arch_domain_destroy(struct domain * 13.4 13.5 void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c) 13.6 { 13.7 + int i; 13.8 + struct vcpu_extra_regs *er = &c->extra_regs; 13.9 + 13.10 c->user_regs = *vcpu_regs (v); 13.11 c->privregs_pfn = virt_to_maddr(v->arch.privregs) >> PAGE_SHIFT; 13.12 + 13.13 + /* Fill extra regs. */ 13.14 + for (i = 0; i < 8; i++) { 13.15 + er->itrs[i].pte = v->arch.itrs[i].pte.val; 13.16 + er->itrs[i].itir = v->arch.itrs[i].itir; 13.17 + er->itrs[i].vadr = v->arch.itrs[i].vadr; 13.18 + er->itrs[i].rid = v->arch.itrs[i].rid; 13.19 + } 13.20 + for (i = 0; i < 8; i++) { 13.21 + er->dtrs[i].pte = v->arch.dtrs[i].pte.val; 13.22 + er->dtrs[i].itir = v->arch.dtrs[i].itir; 13.23 + er->dtrs[i].vadr = v->arch.dtrs[i].vadr; 13.24 + er->dtrs[i].rid = v->arch.dtrs[i].rid; 13.25 + } 13.26 + er->event_callback_ip = v->arch.event_callback_ip; 13.27 + er->dcr = v->arch.dcr; 13.28 + er->iva = v->arch.iva; 13.29 } 13.30 13.31 int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c) 13.32 @@ -423,6 +443,28 @@ int arch_set_info_guest(struct vcpu *v, 13.33 regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; 13.34 regs->ar_rsc |= (2 << 2); /* force PL2/3 */ 13.35 } 13.36 + 13.37 + if (c->flags & VGCF_EXTRA_REGS) { 13.38 + int i; 13.39 + struct vcpu_extra_regs *er = &c->extra_regs; 13.40 + 13.41 + for (i = 0; i < 8; i++) { 13.42 + vcpu_set_itr(v, i, er->itrs[i].pte, 13.43 + er->itrs[i].itir, 13.44 + er->itrs[i].vadr, 13.45 + er->itrs[i].rid); 13.46 + } 13.47 + for (i = 0; i < 8; i++) { 13.48 + vcpu_set_dtr(v, i, 13.49 + er->dtrs[i].pte, 13.50 + er->dtrs[i].itir, 13.51 + er->dtrs[i].vadr, 13.52 + er->dtrs[i].rid); 13.53 + } 13.54 + v->arch.event_callback_ip = er->event_callback_ip; 13.55 + v->arch.dcr = er->dcr; 13.56 + v->arch.iva = er->iva; 13.57 + } 13.58 13.59 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 13.60 return 0;
14.1 --- a/xen/arch/ia64/xen/vcpu.c Tue Jul 11 11:29:25 2006 -0600 14.2 +++ b/xen/arch/ia64/xen/vcpu.c Tue Jul 11 12:51:18 2006 -0600 14.3 @@ -1880,13 +1880,15 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6 14.4 VCPU translation register access routines 14.5 **************************************************************************/ 14.6 14.7 -static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa) 14.8 +static void 14.9 +vcpu_set_tr_entry_rid(TR_ENTRY *trp, UINT64 pte, 14.10 + UINT64 itir, UINT64 ifa, UINT64 rid) 14.11 { 14.12 UINT64 ps; 14.13 union pte_flags new_pte; 14.14 14.15 trp->itir = itir; 14.16 - trp->rid = VCPU(current,rrs[ifa>>61]) & RR_RID_MASK; 14.17 + trp->rid = rid; 14.18 ps = trp->ps; 14.19 new_pte.val = pte; 14.20 if (new_pte.pl < 2) new_pte.pl = 2; 14.21 @@ -1900,8 +1902,15 @@ static void vcpu_set_tr_entry(TR_ENTRY * 14.22 trp->pte.val = new_pte.val; 14.23 } 14.24 14.25 +static inline void 14.26 +vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa) 14.27 +{ 14.28 + vcpu_set_tr_entry_rid(trp, pte, itir, ifa, 14.29 + VCPU(current, rrs[ifa>>61]) & RR_RID_MASK); 14.30 +} 14.31 + 14.32 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, 14.33 - UINT64 itir, UINT64 ifa) 14.34 + UINT64 itir, UINT64 ifa) 14.35 { 14.36 TR_ENTRY *trp; 14.37 14.38 @@ -1920,7 +1929,7 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 14.39 } 14.40 14.41 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, 14.42 - UINT64 itir, UINT64 ifa) 14.43 + UINT64 itir, UINT64 ifa) 14.44 { 14.45 TR_ENTRY *trp; 14.46 14.47 @@ -1938,6 +1947,44 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 14.48 return IA64_NO_FAULT; 14.49 } 14.50 14.51 +IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot, u64 pte, 14.52 + u64 itir, u64 ifa, u64 rid) 14.53 +{ 14.54 + TR_ENTRY *trp; 14.55 + 14.56 + if (slot >= NITRS) 14.57 + return IA64_RSVDREG_FAULT; 14.58 + trp = &PSCBX(vcpu, itrs[slot]); 14.59 + vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid); 14.60 + 14.61 + /* Recompute the itr_region. */ 14.62 + vcpu->arch.itr_regions = 0; 14.63 + for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++) 14.64 + if (trp->pte.p) 14.65 + vcpu_quick_region_set(vcpu->arch.itr_regions, 14.66 + trp->vadr); 14.67 + return IA64_NO_FAULT; 14.68 +} 14.69 + 14.70 +IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot, u64 pte, 14.71 + u64 itir, u64 ifa, u64 rid) 14.72 +{ 14.73 + TR_ENTRY *trp; 14.74 + 14.75 + if (slot >= NDTRS) 14.76 + return IA64_RSVDREG_FAULT; 14.77 + trp = &PSCBX(vcpu, dtrs[slot]); 14.78 + vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid); 14.79 + 14.80 + /* Recompute the dtr_region. */ 14.81 + vcpu->arch.dtr_regions = 0; 14.82 + for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++) 14.83 + if (trp->pte.p) 14.84 + vcpu_quick_region_set(vcpu->arch.dtr_regions, 14.85 + trp->vadr); 14.86 + return IA64_NO_FAULT; 14.87 +} 14.88 + 14.89 /************************************************************************** 14.90 VCPU translation cache access routines 14.91 **************************************************************************/ 14.92 @@ -2159,7 +2206,6 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v 14.93 vcpu_quick_region_set(vcpu->arch.itr_regions, 14.94 trp->vadr); 14.95 14.96 - 14.97 vcpu_flush_tlb_vhpt_range (vadr, log_range); 14.98 14.99 return IA64_NO_FAULT;
15.1 --- a/xen/include/asm-ia64/vcpu.h Tue Jul 11 11:29:25 2006 -0600 15.2 +++ b/xen/include/asm-ia64/vcpu.h Tue Jul 11 12:51:18 2006 -0600 15.3 @@ -164,6 +164,11 @@ extern void vcpu_itc_no_srlz(VCPU *vcpu, 15.4 extern UINT64 vcpu_get_tmp(VCPU *, UINT64); 15.5 extern void vcpu_set_tmp(VCPU *, UINT64, UINT64); 15.6 15.7 +extern IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot, 15.8 + u64 pte, u64 itir, u64 ifa, u64 rid); 15.9 +extern IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot, 15.10 + u64 pte, u64 itir, u64 ifa, u64 rid); 15.11 + 15.12 /* Initialize vcpu regs. */ 15.13 extern void vcpu_init_regs (struct vcpu *v); 15.14
16.1 --- a/xen/include/public/arch-ia64.h Tue Jul 11 11:29:25 2006 -0600 16.2 +++ b/xen/include/public/arch-ia64.h Tue Jul 11 12:51:18 2006 -0600 16.3 @@ -302,10 +302,27 @@ typedef struct arch_shared_info arch_sha 16.4 16.5 typedef unsigned long xen_callback_t; 16.6 16.7 +struct ia64_tr_entry { 16.8 + unsigned long pte; 16.9 + unsigned long itir; 16.10 + unsigned long vadr; 16.11 + unsigned long rid; 16.12 +}; 16.13 + 16.14 +struct vcpu_extra_regs { 16.15 + struct ia64_tr_entry itrs[8]; 16.16 + struct ia64_tr_entry dtrs[8]; 16.17 + unsigned long iva; 16.18 + unsigned long dcr; 16.19 + unsigned long event_callback_ip; 16.20 +}; 16.21 + 16.22 struct vcpu_guest_context { 16.23 +#define VGCF_EXTRA_REGS (1<<1) /* Get/Set extra regs. */ 16.24 unsigned long flags; /* VGCF_* flags */ 16.25 16.26 struct cpu_user_regs user_regs; 16.27 + struct vcpu_extra_regs extra_regs; 16.28 unsigned long privregs_pfn; 16.29 }; 16.30 typedef struct vcpu_guest_context vcpu_guest_context_t;
17.1 --- a/xen/include/public/dom0_ops.h Tue Jul 11 11:29:25 2006 -0600 17.2 +++ b/xen/include/public/dom0_ops.h Tue Jul 11 12:51:18 2006 -0600 17.3 @@ -518,12 +518,16 @@ DEFINE_XEN_GUEST_HANDLE(dom0_hypercall_i 17.4 #define DOM0_DOMAIN_SETUP 49 17.5 #define _XEN_DOMAINSETUP_hvm_guest 0 17.6 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) 17.7 +#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ 17.8 +#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) 17.9 typedef struct dom0_domain_setup { 17.10 domid_t domain; /* domain to be affected */ 17.11 unsigned long flags; /* XEN_DOMAINSETUP_* */ 17.12 #ifdef __ia64__ 17.13 unsigned long bp; /* mpaddr of boot param area */ 17.14 unsigned long maxmem; /* Highest memory address for MDT. */ 17.15 + unsigned long xsi_va; /* Xen shared_info area virtual address. */ 17.16 + unsigned int hypercall_imm; /* Break imm for Xen hypercalls. */ 17.17 #endif 17.18 } dom0_domain_setup_t; 17.19 DEFINE_XEN_GUEST_HANDLE(dom0_domain_setup_t);