direct-io.hg

view xen/arch/powerpc/domain.c @ 11487:4fdf5151b187

[POWERPC] merge with xen-unstable.hg
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author hollisb@localhost
date Mon Sep 18 12:48:56 2006 -0500 (2006-09-18)
parents e07281779b88
children 0e02ac68b01b
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #include <stdarg.h>
22 #include <xen/config.h>
23 #include <xen/lib.h>
24 #include <xen/sched.h>
25 #include <xen/mm.h>
26 #include <xen/serial.h>
27 #include <xen/domain.h>
28 #include <xen/console.h>
29 #include <xen/shutdown.h>
30 #include <xen/shadow.h>
31 #include <xen/mm.h>
32 #include <asm/htab.h>
33 #include <asm/current.h>
34 #include <asm/hcalls.h>
36 extern void idle_loop(void);
38 #define next_arg(fmt, args) ({ \
39 unsigned long __arg; \
40 switch ( *(fmt)++ ) \
41 { \
42 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
43 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
44 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
45 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
46 default: __arg = 0; BUG(); \
47 } \
48 __arg; \
49 })
51 unsigned long hypercall_create_continuation(unsigned int op,
52 const char *format, ...)
53 {
54 struct cpu_user_regs *regs = guest_cpu_user_regs();
55 const char *p = format;
56 va_list args;
57 int gprnum = 4;
58 int i;
60 va_start(args, format);
62 regs->pc -= 4; /* re-execute 'sc' */
64 for (i = 0; *p != '\0'; i++) {
65 regs->gprs[gprnum++] = next_arg(p, args);
66 }
68 va_end(args);
70 /* As luck would have it, we use the same register for hcall opcodes and
71 * for hcall return values. The return value from this function is placed
72 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
73 return XEN_MARK(op);
74 }
76 int arch_domain_create(struct domain *d)
77 {
78 if (d->domain_id == IDLE_DOMAIN_ID) {
79 d->shared_info = (void *)alloc_xenheap_page();
80 clear_page(d->shared_info);
82 return 0;
83 }
85 d->arch.large_page_sizes = cpu_large_page_orders(
86 d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
88 INIT_LIST_HEAD(&d->arch.extent_list);
90 return 0;
91 }
93 void arch_domain_destroy(struct domain *d)
94 {
95 shadow_teardown(d);
96 }
98 void machine_halt(void)
99 {
100 printf("machine_halt called: spinning....\n");
101 console_start_sync();
102 while(1);
103 }
105 void machine_restart(char * __unused)
106 {
107 printf("machine_restart called: spinning....\n");
108 console_start_sync();
109 while(1);
110 }
112 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
113 {
114 struct vcpu *v;
116 if ( (v = xmalloc(struct vcpu)) == NULL )
117 return NULL;
119 memset(v, 0, sizeof(*v));
120 v->vcpu_id = vcpu_id;
122 return v;
123 }
125 void free_vcpu_struct(struct vcpu *v)
126 {
127 BUG_ON(v->next_in_list != NULL);
128 if ( v->vcpu_id != 0 )
129 v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
130 xfree(v);
131 }
133 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c)
134 {
135 memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
137 printf("Domain[%d].%d: initializing\n",
138 v->domain->domain_id, v->vcpu_id);
140 if (v->domain->arch.htab.order == 0)
141 panic("Page table never allocated for Domain: %d\n",
142 v->domain->domain_id);
143 if (v->domain->arch.rma_order == 0)
144 panic("RMA never allocated for Domain: %d\n",
145 v->domain->domain_id);
147 set_bit(_VCPUF_initialised, &v->vcpu_flags);
149 cpu_init_vcpu(v);
151 return 0;
152 }
154 void dump_pageframe_info(struct domain *d)
155 {
156 struct page_info *page;
158 printk("Memory pages belonging to domain %u:\n", d->domain_id);
160 if ( d->tot_pages >= 10 )
161 {
162 printk(" DomPage list too long to display\n");
163 }
164 else
165 {
166 list_for_each_entry ( page, &d->page_list, list )
167 {
168 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
169 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
170 page->count_info, page->u.inuse.type_info);
171 }
172 }
174 list_for_each_entry ( page, &d->xenpage_list, list )
175 {
176 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
177 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
178 page->count_info, page->u.inuse.type_info);
179 }
180 }
183 void context_switch(struct vcpu *prev, struct vcpu *next)
184 {
185 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
186 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
187 unsigned int cpu = smp_processor_id();
189 #if 0
190 printf("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
191 next->domain->domain_id);
192 #endif
194 /* Allow at most one CPU at a time to be dirty. */
195 ASSERT(cpus_weight(dirty_mask) <= 1);
196 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
197 {
198 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
199 if (!cpus_empty(next->vcpu_dirty_cpumask))
200 flush_tlb_mask(next->vcpu_dirty_cpumask);
201 }
203 /* copy prev guest state off the stack into its vcpu */
204 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
206 set_current(next);
208 /* copy next guest state onto the stack */
209 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
211 /* save old domain state */
212 save_sprs(prev);
213 save_float(prev);
214 save_segments(prev);
216 context_saved(prev);
218 /* load up new domain */
219 load_sprs(next);
220 load_float(next);
221 load_segments(next);
223 mtsdr1(next->domain->arch.htab.sdr1);
224 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
226 if (is_idle_vcpu(next)) {
227 reset_stack_and_jump(idle_loop);
228 }
230 reset_stack_and_jump(full_resume);
231 /* not reached */
232 }
234 void continue_running(struct vcpu *same)
235 {
236 /* nothing to do */
237 return;
238 }
240 void sync_vcpu_execstate(struct vcpu *v)
241 {
242 /* do nothing */
243 return;
244 }
246 void domain_relinquish_resources(struct domain *d)
247 {
248 if (d->arch.rma_page)
249 free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
250 free_extents(d);
251 }
253 void arch_dump_domain_info(struct domain *d)
254 {
255 }