direct-io.hg

view xen/arch/powerpc/domain.c @ 11379:215d5eae720c

[XEN][POWERPC] restructure RMA code to allow dom0 tools to allocate in the future
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Aug 25 14:48:07 2006 -0500 (2006-08-25)
parents 43ec7afa5734
children 6bd1a39dbfc8
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #include <stdarg.h>
22 #include <xen/config.h>
23 #include <xen/lib.h>
24 #include <xen/sched.h>
25 #include <xen/mm.h>
26 #include <xen/serial.h>
27 #include <xen/domain.h>
28 #include <xen/console.h>
29 #include <xen/shutdown.h>
30 #include <xen/mm.h>
31 #include <asm/htab.h>
32 #include <asm/current.h>
33 #include <asm/hcalls.h>
35 extern void idle_loop(void);
37 #define next_arg(fmt, args) ({ \
38 unsigned long __arg; \
39 switch ( *(fmt)++ ) \
40 { \
41 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
42 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
43 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
44 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
45 default: __arg = 0; BUG(); \
46 } \
47 __arg; \
48 })
50 unsigned long hypercall_create_continuation(unsigned int op,
51 const char *format, ...)
52 {
53 struct cpu_user_regs *regs = guest_cpu_user_regs();
54 const char *p = format;
55 va_list args;
56 int gprnum = 4;
57 int i;
59 va_start(args, format);
61 regs->pc -= 4; /* re-execute 'sc' */
63 for (i = 0; *p != '\0'; i++) {
64 regs->gprs[gprnum++] = next_arg(p, args);
65 }
67 va_end(args);
69 /* As luck would have it, we use the same register for hcall opcodes and
70 * for hcall return values. The return value from this function is placed
71 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
72 return XEN_MARK(op);
73 }
75 int arch_domain_create(struct domain *d)
76 {
77 unsigned long rma_base;
78 unsigned long rma_sz;
79 uint htab_order;
80 uint nr_pages;
82 if (d->domain_id == IDLE_DOMAIN_ID) {
83 d->shared_info = (void *)alloc_xenheap_page();
84 clear_page(d->shared_info);
86 return 0;
87 }
89 d->arch.rma_order = cpu_default_rma_order_pages();
90 rma_sz = rma_size(d->arch.rma_order);
92 /* allocate the real mode area */
93 nr_pages = 1UL << d->arch.rma_order;
94 d->max_pages = nr_pages;
95 d->tot_pages = 0;
96 d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
97 if (NULL == d->arch.rma_page)
98 return 1;
100 rma_base = page_to_maddr(d->arch.rma_page);
102 BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
104 printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
105 memset((void *)rma_base, 0, rma_sz);
107 d->shared_info = (shared_info_t *)
108 (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
110 d->arch.large_page_sizes = cpu_large_page_orders(
111 d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
113 /* FIXME: we need to the the maximum addressible memory for this
114 * domain to calculate this correctly. It should probably be set
115 * by the managment tools */
116 htab_order = d->arch.rma_order - 6; /* (1/64) */
117 if (test_bit(_DOMF_privileged, &d->domain_flags)) {
118 /* bump the htab size of privleged domains */
119 ++htab_order;
120 }
121 htab_alloc(d, htab_order);
123 return 0;
124 }
126 void arch_domain_destroy(struct domain *d)
127 {
128 htab_free(d);
129 }
131 void machine_halt(void)
132 {
133 printf("machine_halt called: spinning....\n");
134 console_start_sync();
135 while(1);
136 }
138 void machine_restart(char * __unused)
139 {
140 printf("machine_restart called: spinning....\n");
141 console_start_sync();
142 while(1);
143 }
145 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
146 {
147 struct vcpu *v;
149 if ( (v = xmalloc(struct vcpu)) == NULL )
150 return NULL;
152 memset(v, 0, sizeof(*v));
153 v->vcpu_id = vcpu_id;
155 return v;
156 }
158 void free_vcpu_struct(struct vcpu *v)
159 {
160 BUG_ON(v->next_in_list != NULL);
161 if ( v->vcpu_id != 0 )
162 v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
163 xfree(v);
164 }
166 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c)
167 {
168 memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
170 set_bit(_VCPUF_initialised, &v->vcpu_flags);
172 cpu_init_vcpu(v);
174 return 0;
175 }
177 void dump_pageframe_info(struct domain *d)
178 {
179 struct page_info *page;
181 printk("Memory pages belonging to domain %u:\n", d->domain_id);
183 if ( d->tot_pages >= 10 )
184 {
185 printk(" DomPage list too long to display\n");
186 }
187 else
188 {
189 list_for_each_entry ( page, &d->page_list, list )
190 {
191 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
192 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
193 page->count_info, page->u.inuse.type_info);
194 }
195 }
197 list_for_each_entry ( page, &d->xenpage_list, list )
198 {
199 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
200 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
201 page->count_info, page->u.inuse.type_info);
202 }
203 }
206 void context_switch(struct vcpu *prev, struct vcpu *next)
207 {
208 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
209 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
210 unsigned int cpu = smp_processor_id();
212 #if 0
213 printf("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
214 next->domain->domain_id);
215 #endif
217 /* Allow at most one CPU at a time to be dirty. */
218 ASSERT(cpus_weight(dirty_mask) <= 1);
219 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
220 {
221 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
222 if (!cpus_empty(next->vcpu_dirty_cpumask))
223 flush_tlb_mask(next->vcpu_dirty_cpumask);
224 }
226 /* copy prev guest state off the stack into its vcpu */
227 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
229 set_current(next);
231 /* copy next guest state onto the stack */
232 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
234 /* save old domain state */
235 save_sprs(prev);
236 save_float(prev);
237 save_segments(prev);
239 context_saved(prev);
241 /* load up new domain */
242 load_sprs(next);
243 load_float(next);
244 load_segments(next);
246 mtsdr1(next->domain->arch.htab.sdr1);
247 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
249 if (is_idle_vcpu(next)) {
250 reset_stack_and_jump(idle_loop);
251 }
253 reset_stack_and_jump(full_resume);
254 /* not reached */
255 }
257 void continue_running(struct vcpu *same)
258 {
259 /* nothing to do */
260 }
262 void sync_vcpu_execstate(struct vcpu *v)
263 {
264 /* XXX for now, for domain destruction, make this non-fatal */
265 printf("%s: called\n", __func__);
266 }
268 void domain_relinquish_resources(struct domain *d)
269 {
270 free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
271 }
273 void arch_dump_domain_info(struct domain *d)
274 {
275 }