ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 10816:7be1cfe8345b

[IA64] iomem support for driver domains.

First steps in hypevisor to support driver domains.

IO ports capabilities added (not yet used).
IO mem capabilities checked.
ASSIGN_nocache flag added.
Memory attributes checked.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Thu Jul 27 09:47:10 2006 -0600 (2006-07-27)
parents 86e5d8458c08
children 199d53efd029
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/pdb.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/guest_access.h>
20 #include <public/sched_ctl.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <xen/iocap.h>
25 void build_physmap_table(struct domain *d);
27 extern unsigned long total_pages;
28 long arch_do_dom0_op(dom0_op_t *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
29 {
30 long ret = 0;
32 if ( !IS_PRIV(current->domain) )
33 return -EPERM;
35 switch ( op->cmd )
36 {
37 case DOM0_GETPAGEFRAMEINFO:
38 {
39 struct page_info *page;
40 unsigned long mfn = op->u.getpageframeinfo.gmfn;
41 domid_t dom = op->u.getpageframeinfo.domain;
42 struct domain *d;
44 ret = -EINVAL;
46 if ( unlikely(!mfn_valid(mfn)) ||
47 unlikely((d = find_domain_by_id(dom)) == NULL) )
48 break;
50 page = mfn_to_page(mfn);
52 if ( likely(get_page(page, d)) )
53 {
54 ret = 0;
56 op->u.getpageframeinfo.type = NOTAB;
58 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
59 {
60 switch ( page->u.inuse.type_info & PGT_type_mask )
61 {
62 default:
63 panic("No such page type\n");
64 break;
65 }
66 }
68 put_page(page);
69 }
71 put_domain(d);
73 copy_to_guest(u_dom0_op, op, 1);
74 }
75 break;
77 case DOM0_GETPAGEFRAMEINFO2:
78 {
79 #define GPF2_BATCH 128
80 int n,j;
81 int num = op->u.getpageframeinfo2.num;
82 domid_t dom = op->u.getpageframeinfo2.domain;
83 struct domain *d;
84 unsigned long *l_arr;
85 ret = -ESRCH;
87 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
88 break;
90 if ( unlikely(num > 1024) )
91 {
92 ret = -E2BIG;
93 break;
94 }
96 l_arr = (unsigned long *)alloc_xenheap_page();
98 ret = 0;
99 for( n = 0; n < num; )
100 {
101 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
103 if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
104 n, k) )
105 {
106 ret = -EINVAL;
107 break;
108 }
110 for( j = 0; j < k; j++ )
111 {
112 struct page_info *page;
113 unsigned long mfn = l_arr[j];
115 if ( unlikely(mfn >= max_page) )
116 goto e2_err;
118 page = mfn_to_page(mfn);
120 if ( likely(get_page(page, d)) )
121 {
122 unsigned long type = 0;
124 switch( page->u.inuse.type_info & PGT_type_mask )
125 {
126 default:
127 panic("No such page type\n");
128 break;
129 }
131 if ( page->u.inuse.type_info & PGT_pinned )
132 type |= LPINTAB;
133 l_arr[j] |= type;
134 put_page(page);
135 }
136 else
137 {
138 e2_err:
139 l_arr[j] |= XTAB;
140 }
142 }
144 if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
145 n, l_arr, k) )
146 {
147 ret = -EINVAL;
148 break;
149 }
151 n += j;
152 }
154 free_xenheap_page((void *) l_arr);
156 put_domain(d);
157 }
158 break;
160 case DOM0_GETMEMLIST:
161 {
162 unsigned long i;
163 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
164 unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
165 unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
166 unsigned long mfn;
168 if ( d == NULL ) {
169 ret = -EINVAL;
170 break;
171 }
172 for (i = 0 ; i < nr_pages ; i++) {
173 pte_t *pte;
175 pte = (pte_t *)lookup_noalloc_domain_pte(d,
176 (start_page + i) << PAGE_SHIFT);
177 if (pte && pte_present(*pte))
178 mfn = pte_pfn(*pte);
179 else
180 mfn = INVALID_MFN;
182 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
183 ret = -EFAULT;
184 break;
185 }
186 }
188 op->u.getmemlist.num_pfns = i;
189 if (copy_to_guest(u_dom0_op, op, 1))
190 ret = -EFAULT;
192 put_domain(d);
193 }
194 break;
196 case DOM0_PHYSINFO:
197 {
198 dom0_physinfo_t *pi = &op->u.physinfo;
200 pi->threads_per_core =
201 cpus_weight(cpu_sibling_map[0]);
202 pi->cores_per_socket =
203 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
204 pi->sockets_per_node =
205 num_online_cpus() / cpus_weight(cpu_core_map[0]);
206 pi->nr_nodes = 1;
207 pi->total_pages = total_pages;
208 pi->free_pages = avail_domheap_pages();
209 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
210 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
211 //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
212 ret = 0;
213 if ( copy_to_guest(u_dom0_op, op, 1) )
214 ret = -EFAULT;
215 }
216 break;
218 case DOM0_DOMAIN_SETUP:
219 {
220 dom0_domain_setup_t *ds = &op->u.domain_setup;
221 struct domain *d = find_domain_by_id(ds->domain);
223 if ( d == NULL) {
224 ret = -EINVAL;
225 break;
226 }
228 if (ds->flags & XEN_DOMAINSETUP_query) {
229 /* Set flags. */
230 if (d->arch.is_vti)
231 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
232 /* Set params. */
233 ds->bp = 0; /* unknown. */
234 ds->maxmem = 0; /* unknown. */
235 ds->xsi_va = d->arch.shared_info_va;
236 ds->hypercall_imm = d->arch.breakimm;
237 /* Copy back. */
238 if ( copy_to_guest(u_dom0_op, op, 1) )
239 ret = -EFAULT;
240 }
241 else {
242 if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
243 if (!vmx_enabled) {
244 printk("No VMX hardware feature for vmx domain.\n");
245 ret = -EINVAL;
246 break;
247 }
248 d->arch.is_vti = 1;
249 vmx_setup_platform(d);
250 }
251 else {
252 build_physmap_table(d);
253 dom_fw_setup(d, ds->bp, ds->maxmem);
254 if (ds->xsi_va)
255 d->arch.shared_info_va = ds->xsi_va;
256 if (ds->hypercall_imm) {
257 struct vcpu *v;
258 d->arch.breakimm = ds->hypercall_imm;
259 for_each_vcpu (d, v)
260 v->arch.breakimm = d->arch.breakimm;
261 }
262 }
263 }
265 put_domain(d);
266 }
267 break;
269 case DOM0_SHADOW_CONTROL:
270 {
271 struct domain *d;
272 ret = -ESRCH;
273 d = find_domain_by_id(op->u.shadow_control.domain);
274 if ( d != NULL )
275 {
276 ret = shadow_mode_control(d, &op->u.shadow_control);
277 put_domain(d);
278 copy_to_guest(u_dom0_op, op, 1);
279 }
280 }
281 break;
283 case DOM0_IOPORT_PERMISSION:
284 {
285 struct domain *d;
286 unsigned int fp = op->u.ioport_permission.first_port;
287 unsigned int np = op->u.ioport_permission.nr_ports;
289 ret = -ESRCH;
290 d = find_domain_by_id(op->u.ioport_permission.domain);
291 if (unlikely(d == NULL))
292 break;
294 if (np == 0)
295 ret = 0;
296 else {
297 if (op->u.ioport_permission.allow_access)
298 ret = ioports_permit_access(d, fp, fp + np - 1);
299 else
300 ret = ioports_deny_access(d, fp, fp + np - 1);
301 }
303 put_domain(d);
304 }
305 break;
306 default:
307 printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
308 ret = -ENOSYS;
310 }
312 return ret;
313 }
315 #ifdef CONFIG_XEN_IA64_DOM0_VP
316 static unsigned long
317 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
318 {
319 unsigned long end;
321 /* Linux may use a 0 size! */
322 if (size == 0)
323 size = PAGE_SIZE;
325 end = PAGE_ALIGN(mpaddr + size);
327 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
328 (end >> PAGE_SHIFT) - 1))
329 return -EPERM;
331 return assign_domain_mmio_page(d, mpaddr, size);
332 }
334 unsigned long
335 do_dom0vp_op(unsigned long cmd,
336 unsigned long arg0, unsigned long arg1, unsigned long arg2,
337 unsigned long arg3)
338 {
339 unsigned long ret = 0;
340 struct domain *d = current->domain;
342 switch (cmd) {
343 case IA64_DOM0VP_ioremap:
344 ret = dom0vp_ioremap(d, arg0, arg1);
345 break;
346 case IA64_DOM0VP_phystomach:
347 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
348 if (ret == INVALID_MFN) {
349 DPRINTK("%s:%d INVALID_MFN ret: 0x%lx\n", __func__, __LINE__, ret);
350 } else {
351 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
352 }
353 break;
354 case IA64_DOM0VP_machtophys:
355 if (!mfn_valid(arg0)) {
356 ret = INVALID_M2P_ENTRY;
357 break;
358 }
359 ret = get_gpfn_from_mfn(arg0);
360 break;
361 case IA64_DOM0VP_zap_physmap:
362 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
363 break;
364 case IA64_DOM0VP_add_physmap:
365 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
366 (domid_t)arg3);
367 break;
368 default:
369 ret = -1;
370 printf("unknown dom0_vp_op 0x%lx\n", cmd);
371 break;
372 }
374 return ret;
375 }
376 #endif
378 /*
379 * Local variables:
380 * mode: C
381 * c-set-style: "BSD"
382 * c-basic-offset: 4
383 * tab-width: 4
384 * indent-tabs-mode: nil
385 * End:
386 */