ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 10786:86e5d8458c08

[IA64] live migration

Shadow mode and live migration.

Virtualize Dirty bit.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 26 09:36:36 2006 -0600 (2006-07-26)
parents 306d7857928c
children 7be1cfe8345b
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/pdb.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/guest_access.h>
20 #include <public/sched_ctl.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
24 void build_physmap_table(struct domain *d);
26 extern unsigned long total_pages;
27 long arch_do_dom0_op(dom0_op_t *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
28 {
29 long ret = 0;
31 if ( !IS_PRIV(current->domain) )
32 return -EPERM;
34 switch ( op->cmd )
35 {
36 case DOM0_GETPAGEFRAMEINFO:
37 {
38 struct page_info *page;
39 unsigned long mfn = op->u.getpageframeinfo.gmfn;
40 domid_t dom = op->u.getpageframeinfo.domain;
41 struct domain *d;
43 ret = -EINVAL;
45 if ( unlikely(!mfn_valid(mfn)) ||
46 unlikely((d = find_domain_by_id(dom)) == NULL) )
47 break;
49 page = mfn_to_page(mfn);
51 if ( likely(get_page(page, d)) )
52 {
53 ret = 0;
55 op->u.getpageframeinfo.type = NOTAB;
57 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
58 {
59 switch ( page->u.inuse.type_info & PGT_type_mask )
60 {
61 default:
62 panic("No such page type\n");
63 break;
64 }
65 }
67 put_page(page);
68 }
70 put_domain(d);
72 copy_to_guest(u_dom0_op, op, 1);
73 }
74 break;
76 case DOM0_GETPAGEFRAMEINFO2:
77 {
78 #define GPF2_BATCH 128
79 int n,j;
80 int num = op->u.getpageframeinfo2.num;
81 domid_t dom = op->u.getpageframeinfo2.domain;
82 struct domain *d;
83 unsigned long *l_arr;
84 ret = -ESRCH;
86 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
87 break;
89 if ( unlikely(num > 1024) )
90 {
91 ret = -E2BIG;
92 break;
93 }
95 l_arr = (unsigned long *)alloc_xenheap_page();
97 ret = 0;
98 for( n = 0; n < num; )
99 {
100 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
102 if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
103 n, k) )
104 {
105 ret = -EINVAL;
106 break;
107 }
109 for( j = 0; j < k; j++ )
110 {
111 struct page_info *page;
112 unsigned long mfn = l_arr[j];
114 if ( unlikely(mfn >= max_page) )
115 goto e2_err;
117 page = mfn_to_page(mfn);
119 if ( likely(get_page(page, d)) )
120 {
121 unsigned long type = 0;
123 switch( page->u.inuse.type_info & PGT_type_mask )
124 {
125 default:
126 panic("No such page type\n");
127 break;
128 }
130 if ( page->u.inuse.type_info & PGT_pinned )
131 type |= LPINTAB;
132 l_arr[j] |= type;
133 put_page(page);
134 }
135 else
136 {
137 e2_err:
138 l_arr[j] |= XTAB;
139 }
141 }
143 if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
144 n, l_arr, k) )
145 {
146 ret = -EINVAL;
147 break;
148 }
150 n += j;
151 }
153 free_xenheap_page((void *) l_arr);
155 put_domain(d);
156 }
157 break;
159 case DOM0_GETMEMLIST:
160 {
161 unsigned long i;
162 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
163 unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
164 unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
165 unsigned long mfn;
167 if ( d == NULL ) {
168 ret = -EINVAL;
169 break;
170 }
171 for (i = 0 ; i < nr_pages ; i++) {
172 pte_t *pte;
174 pte = (pte_t *)lookup_noalloc_domain_pte(d,
175 (start_page + i) << PAGE_SHIFT);
176 if (pte && pte_present(*pte))
177 mfn = pte_pfn(*pte);
178 else
179 mfn = INVALID_MFN;
181 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
182 ret = -EFAULT;
183 break;
184 }
185 }
187 op->u.getmemlist.num_pfns = i;
188 if (copy_to_guest(u_dom0_op, op, 1))
189 ret = -EFAULT;
191 put_domain(d);
192 }
193 break;
195 case DOM0_PHYSINFO:
196 {
197 dom0_physinfo_t *pi = &op->u.physinfo;
199 pi->threads_per_core =
200 cpus_weight(cpu_sibling_map[0]);
201 pi->cores_per_socket =
202 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
203 pi->sockets_per_node =
204 num_online_cpus() / cpus_weight(cpu_core_map[0]);
205 pi->nr_nodes = 1;
206 pi->total_pages = total_pages;
207 pi->free_pages = avail_domheap_pages();
208 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
209 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
210 //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
211 ret = 0;
212 if ( copy_to_guest(u_dom0_op, op, 1) )
213 ret = -EFAULT;
214 }
215 break;
217 case DOM0_DOMAIN_SETUP:
218 {
219 dom0_domain_setup_t *ds = &op->u.domain_setup;
220 struct domain *d = find_domain_by_id(ds->domain);
222 if ( d == NULL) {
223 ret = -EINVAL;
224 break;
225 }
227 if (ds->flags & XEN_DOMAINSETUP_query) {
228 /* Set flags. */
229 if (d->arch.is_vti)
230 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
231 /* Set params. */
232 ds->bp = 0; /* unknown. */
233 ds->maxmem = 0; /* unknown. */
234 ds->xsi_va = d->arch.shared_info_va;
235 ds->hypercall_imm = d->arch.breakimm;
236 /* Copy back. */
237 if ( copy_to_guest(u_dom0_op, op, 1) )
238 ret = -EFAULT;
239 }
240 else {
241 if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
242 if (!vmx_enabled) {
243 printk("No VMX hardware feature for vmx domain.\n");
244 ret = -EINVAL;
245 break;
246 }
247 d->arch.is_vti = 1;
248 vmx_setup_platform(d);
249 }
250 else {
251 build_physmap_table(d);
252 dom_fw_setup(d, ds->bp, ds->maxmem);
253 if (ds->xsi_va)
254 d->arch.shared_info_va = ds->xsi_va;
255 if (ds->hypercall_imm) {
256 struct vcpu *v;
257 d->arch.breakimm = ds->hypercall_imm;
258 for_each_vcpu (d, v)
259 v->arch.breakimm = d->arch.breakimm;
260 }
261 }
262 }
264 put_domain(d);
265 }
266 break;
268 case DOM0_SHADOW_CONTROL:
269 {
270 struct domain *d;
271 ret = -ESRCH;
272 d = find_domain_by_id(op->u.shadow_control.domain);
273 if ( d != NULL )
274 {
275 ret = shadow_mode_control(d, &op->u.shadow_control);
276 put_domain(d);
277 copy_to_guest(u_dom0_op, op, 1);
278 }
279 }
280 break;
282 default:
283 printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
284 ret = -ENOSYS;
286 }
288 return ret;
289 }
291 #ifdef CONFIG_XEN_IA64_DOM0_VP
292 unsigned long
293 do_dom0vp_op(unsigned long cmd,
294 unsigned long arg0, unsigned long arg1, unsigned long arg2,
295 unsigned long arg3)
296 {
297 unsigned long ret = 0;
298 struct domain *d = current->domain;
300 switch (cmd) {
301 case IA64_DOM0VP_ioremap:
302 ret = assign_domain_mmio_page(d, arg0, arg1);
303 break;
304 case IA64_DOM0VP_phystomach:
305 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
306 if (ret == INVALID_MFN) {
307 DPRINTK("%s:%d INVALID_MFN ret: 0x%lx\n", __func__, __LINE__, ret);
308 } else {
309 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
310 }
311 break;
312 case IA64_DOM0VP_machtophys:
313 if (!mfn_valid(arg0)) {
314 ret = INVALID_M2P_ENTRY;
315 break;
316 }
317 ret = get_gpfn_from_mfn(arg0);
318 break;
319 case IA64_DOM0VP_zap_physmap:
320 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
321 break;
322 case IA64_DOM0VP_add_physmap:
323 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
324 (domid_t)arg3);
325 break;
326 default:
327 ret = -1;
328 printf("unknown dom0_vp_op 0x%lx\n", cmd);
329 break;
330 }
332 return ret;
333 }
334 #endif
336 /*
337 * Local variables:
338 * mode: C
339 * c-set-style: "BSD"
340 * c-basic-offset: 4
341 * tab-width: 4
342 * indent-tabs-mode: nil
343 * End:
344 */