ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 13122:0f571adbd700

[IA64] setup scrub_pages

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Thu Jan 04 16:25:14 2007 -0700 (2007-01-04)
parents 1cfd862e5254
children 7d8670a30445
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <xen/iocap.h>
24 #include <xen/errno.h>
25 #include <xen/nodemask.h>
27 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
29 extern unsigned long total_pages;
31 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
32 {
33 long ret = 0;
35 if ( !IS_PRIV(current->domain) )
36 return -EPERM;
38 switch ( op->cmd )
39 {
40 case XEN_DOMCTL_getmemlist:
41 {
42 unsigned long i;
43 struct domain *d = find_domain_by_id(op->domain);
44 unsigned long start_page = op->u.getmemlist.start_pfn;
45 unsigned long nr_pages = op->u.getmemlist.max_pfns;
46 unsigned long mfn;
48 if ( d == NULL ) {
49 ret = -EINVAL;
50 break;
51 }
52 for (i = 0 ; i < nr_pages ; i++) {
53 pte_t *pte;
55 pte = (pte_t *)lookup_noalloc_domain_pte(d,
56 (start_page + i) << PAGE_SHIFT);
57 if (pte && pte_present(*pte))
58 mfn = start_page + i;
59 else
60 mfn = INVALID_MFN;
62 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
63 ret = -EFAULT;
64 break;
65 }
66 }
68 op->u.getmemlist.num_pfns = i;
69 if (copy_to_guest(u_domctl, op, 1))
70 ret = -EFAULT;
72 put_domain(d);
73 }
74 break;
76 case XEN_DOMCTL_arch_setup:
77 {
78 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
79 struct domain *d = find_domain_by_id(op->domain);
81 if ( d == NULL) {
82 ret = -EINVAL;
83 break;
84 }
86 if (ds->flags & XEN_DOMAINSETUP_query) {
87 /* Set flags. */
88 if (d->arch.is_vti)
89 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
90 /* Set params. */
91 ds->bp = 0; /* unknown. */
92 ds->maxmem = 0; /* unknown. */
93 ds->xsi_va = d->arch.shared_info_va;
94 ds->hypercall_imm = d->arch.breakimm;
95 /* Copy back. */
96 if ( copy_to_guest(u_domctl, op, 1) )
97 ret = -EFAULT;
98 }
99 else {
100 if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
101 if (!vmx_enabled) {
102 printk("No VMX hardware feature for vmx domain.\n");
103 ret = -EINVAL;
104 break;
105 }
106 if (!d->arch.is_vti) {
107 struct vcpu *v;
108 for_each_vcpu(d, v) {
109 BUG_ON(v->arch.privregs == NULL);
110 free_domheap_pages(virt_to_page(v->arch.privregs),
111 get_order_from_shift(XMAPPEDREGS_SHIFT));
112 v->arch.privregs = NULL;
113 relinquish_vcpu_resources(v);
114 }
115 }
116 d->arch.is_vti = 1;
117 vmx_setup_platform(d);
118 }
119 else {
120 dom_fw_setup(d, ds->bp, ds->maxmem);
121 if (ds->xsi_va)
122 d->arch.shared_info_va = ds->xsi_va;
123 if (ds->hypercall_imm) {
124 struct vcpu *v;
125 d->arch.breakimm = ds->hypercall_imm;
126 for_each_vcpu (d, v)
127 v->arch.breakimm = d->arch.breakimm;
128 }
129 {
130 /*
131 * XXX IA64_SHARED_INFO_PADDR
132 * assign these pages into guest psudo physical address
133 * space for dom0 to map this page by gmfn.
134 * this is necessary for domain build, save, restore and
135 * dump-core.
136 */
137 unsigned long i;
138 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
139 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
140 virt_to_maddr(d->shared_info + i));
141 }
142 }
143 }
145 put_domain(d);
146 }
147 break;
149 case XEN_DOMCTL_shadow_op:
150 {
151 struct domain *d;
152 ret = -ESRCH;
153 d = find_domain_by_id(op->domain);
154 if ( d != NULL )
155 {
156 ret = shadow_mode_control(d, &op->u.shadow_op);
157 put_domain(d);
158 copy_to_guest(u_domctl, op, 1);
159 }
160 }
161 break;
163 case XEN_DOMCTL_ioport_permission:
164 {
165 struct domain *d;
166 unsigned int fp = op->u.ioport_permission.first_port;
167 unsigned int np = op->u.ioport_permission.nr_ports;
168 unsigned int lp = fp + np - 1;
170 ret = -ESRCH;
171 d = find_domain_by_id(op->domain);
172 if (unlikely(d == NULL))
173 break;
175 if (np == 0)
176 ret = 0;
177 else {
178 if (op->u.ioport_permission.allow_access)
179 ret = ioports_permit_access(d, fp, lp);
180 else
181 ret = ioports_deny_access(d, fp, lp);
182 }
184 put_domain(d);
185 }
186 break;
187 default:
188 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
189 ret = -ENOSYS;
191 }
193 return ret;
194 }
196 /*
197 * Temporarily disable the NUMA PHYSINFO code until the rest of the
198 * changes are upstream.
199 */
200 #undef IA64_NUMA_PHYSINFO
202 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
203 {
204 long ret = 0;
206 switch ( op->cmd )
207 {
208 case XEN_SYSCTL_physinfo:
209 {
210 #ifdef IA64_NUMA_PHYSINFO
211 int i;
212 node_data_t *chunks;
213 u64 *map, cpu_to_node_map[MAX_NUMNODES];
214 #endif
216 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
218 pi->threads_per_core =
219 cpus_weight(cpu_sibling_map[0]);
220 pi->cores_per_socket =
221 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
222 pi->sockets_per_node =
223 num_online_cpus() / cpus_weight(cpu_core_map[0]);
224 #ifndef IA64_NUMA_PHYSINFO
225 pi->nr_nodes = 1;
226 #endif
227 pi->total_pages = total_pages;
228 pi->free_pages = avail_domheap_pages();
229 pi->scrub_pages = avail_scrub_pages();
230 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
231 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
232 //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
233 ret = 0;
235 #ifdef IA64_NUMA_PHYSINFO
236 /* fetch memory_chunk pointer from guest */
237 get_xen_guest_handle(chunks, pi->memory_chunks);
239 printk("chunks=%p, num_node_memblks=%u\n", chunks, num_node_memblks);
240 /* if it is set, fill out memory chunk array */
241 if (chunks != NULL) {
242 if (num_node_memblks == 0) {
243 /* Non-NUMA machine. Put pseudo-values. */
244 node_data_t data;
245 data.node_start_pfn = 0;
246 data.node_spanned_pages = total_pages;
247 data.node_id = 0;
248 /* copy memory chunk structs to guest */
249 if (copy_to_guest_offset(pi->memory_chunks, 0, &data, 1)) {
250 ret = -EFAULT;
251 break;
252 }
253 } else {
254 for (i = 0; i < num_node_memblks && i < PUBLIC_MAXCHUNKS; i++) {
255 node_data_t data;
256 data.node_start_pfn = node_memblk[i].start_paddr >>
257 PAGE_SHIFT;
258 data.node_spanned_pages = node_memblk[i].size >> PAGE_SHIFT;
259 data.node_id = node_memblk[i].nid;
260 /* copy memory chunk structs to guest */
261 if (copy_to_guest_offset(pi->memory_chunks, i, &data, 1)) {
262 ret = -EFAULT;
263 break;
264 }
265 }
266 }
267 }
268 /* set number of notes */
269 pi->nr_nodes = num_online_nodes();
271 /* fetch cpu_to_node pointer from guest */
272 get_xen_guest_handle(map, pi->cpu_to_node);
274 /* if set, fill out cpu_to_node array */
275 if (map != NULL) {
276 /* copy cpu to node mapping to domU */
277 memset(cpu_to_node_map, 0, sizeof(cpu_to_node_map));
278 for (i = 0; i < num_online_cpus(); i++) {
279 cpu_to_node_map[i] = cpu_to_node(i);
280 if (copy_to_guest_offset(pi->cpu_to_node, i,
281 &(cpu_to_node_map[i]), 1)) {
282 ret = -EFAULT;
283 break;
284 }
285 }
286 }
287 #endif
289 if ( copy_to_guest(u_sysctl, op, 1) )
290 ret = -EFAULT;
291 }
292 break;
294 default:
295 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
296 ret = -ENOSYS;
298 }
300 return ret;
301 }
303 static unsigned long
304 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
305 {
306 unsigned long end;
308 /* Linux may use a 0 size! */
309 if (size == 0)
310 size = PAGE_SIZE;
312 end = PAGE_ALIGN(mpaddr + size);
314 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
315 (end >> PAGE_SHIFT) - 1))
316 return -EPERM;
318 return assign_domain_mmio_page(d, mpaddr, size);
319 }
321 unsigned long
322 do_dom0vp_op(unsigned long cmd,
323 unsigned long arg0, unsigned long arg1, unsigned long arg2,
324 unsigned long arg3)
325 {
326 unsigned long ret = 0;
327 struct domain *d = current->domain;
329 switch (cmd) {
330 case IA64_DOM0VP_ioremap:
331 ret = dom0vp_ioremap(d, arg0, arg1);
332 break;
333 case IA64_DOM0VP_phystomach:
334 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
335 if (ret == INVALID_MFN) {
336 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
337 __func__, ret);
338 } else {
339 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
340 }
341 perfc_incrc(dom0vp_phystomach);
342 break;
343 case IA64_DOM0VP_machtophys:
344 if (!mfn_valid(arg0)) {
345 ret = INVALID_M2P_ENTRY;
346 break;
347 }
348 ret = get_gpfn_from_mfn(arg0);
349 perfc_incrc(dom0vp_machtophys);
350 break;
351 case IA64_DOM0VP_zap_physmap:
352 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
353 break;
354 case IA64_DOM0VP_add_physmap:
355 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
356 (domid_t)arg3);
357 break;
358 case IA64_DOM0VP_add_physmap_with_gmfn:
359 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
360 (domid_t)arg3);
361 break;
362 case IA64_DOM0VP_expose_p2m:
363 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
364 break;
365 case IA64_DOM0VP_perfmon: {
366 XEN_GUEST_HANDLE(void) hnd;
367 set_xen_guest_handle(hnd, (void*)arg1);
368 ret = do_perfmon_op(arg0, hnd, arg2);
369 break;
370 }
371 default:
372 ret = -1;
373 printk("unknown dom0_vp_op 0x%lx\n", cmd);
374 break;
375 }
377 return ret;
378 }
380 /*
381 * Local variables:
382 * mode: C
383 * c-set-style: "BSD"
384 * c-basic-offset: 4
385 * tab-width: 4
386 * indent-tabs-mode: nil
387 * End:
388 */