direct-io.hg

view tools/libxc/xc_private.c @ 10173:954f4dea9da6

[PAE] Allow pgdirs above 4GB for paravirt guests.
**NOTE**: This obviates the need for lowmem_emergency_pool.
Unpriv guests no longer need to be able to allocate memory
below 4GB for PAE PDPTs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri May 26 17:22:30 2006 +0100 (2006-05-26)
parents 24dbb153ab39
children 8aca850f66ad
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include "xc_private.h"
9 /* NB: arr must be mlock'ed */
10 int xc_get_pfn_type_batch(int xc_handle,
11 uint32_t dom, int num, unsigned long *arr)
12 {
13 DECLARE_DOM0_OP;
14 op.cmd = DOM0_GETPAGEFRAMEINFO2;
15 op.u.getpageframeinfo2.domain = (domid_t)dom;
16 op.u.getpageframeinfo2.num = num;
17 set_xen_guest_handle(op.u.getpageframeinfo2.array, arr);
18 return do_dom0_op(xc_handle, &op);
19 }
21 #define GETPFN_ERR (~0U)
22 unsigned int get_pfn_type(int xc_handle,
23 unsigned long mfn,
24 uint32_t dom)
25 {
26 DECLARE_DOM0_OP;
27 op.cmd = DOM0_GETPAGEFRAMEINFO;
28 op.u.getpageframeinfo.mfn = mfn;
29 op.u.getpageframeinfo.domain = (domid_t)dom;
30 if ( do_dom0_op(xc_handle, &op) < 0 )
31 {
32 PERROR("Unexpected failure when getting page frame info!");
33 return GETPFN_ERR;
34 }
35 return op.u.getpageframeinfo.type;
36 }
38 int xc_mmuext_op(
39 int xc_handle,
40 struct mmuext_op *op,
41 unsigned int nr_ops,
42 domid_t dom)
43 {
44 DECLARE_HYPERCALL;
45 long ret = -EINVAL;
47 hypercall.op = __HYPERVISOR_mmuext_op;
48 hypercall.arg[0] = (unsigned long)op;
49 hypercall.arg[1] = (unsigned long)nr_ops;
50 hypercall.arg[2] = (unsigned long)0;
51 hypercall.arg[3] = (unsigned long)dom;
53 if ( mlock(op, nr_ops*sizeof(*op)) != 0 )
54 {
55 PERROR("Could not lock memory for Xen hypercall");
56 goto out1;
57 }
59 ret = do_xen_hypercall(xc_handle, &hypercall);
61 safe_munlock(op, nr_ops*sizeof(*op));
63 out1:
64 return ret;
65 }
67 static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu)
68 {
69 int err = 0;
70 DECLARE_HYPERCALL;
72 if ( mmu->idx == 0 )
73 return 0;
75 hypercall.op = __HYPERVISOR_mmu_update;
76 hypercall.arg[0] = (unsigned long)mmu->updates;
77 hypercall.arg[1] = (unsigned long)mmu->idx;
78 hypercall.arg[2] = 0;
79 hypercall.arg[3] = mmu->subject;
81 if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
82 {
83 PERROR("flush_mmu_updates: mmu updates mlock failed");
84 err = 1;
85 goto out;
86 }
88 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
89 {
90 ERROR("Failure when submitting mmu updates");
91 err = 1;
92 }
94 mmu->idx = 0;
96 safe_munlock(mmu->updates, sizeof(mmu->updates));
98 out:
99 return err;
100 }
102 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom)
103 {
104 xc_mmu_t *mmu = malloc(sizeof(xc_mmu_t));
105 if ( mmu == NULL )
106 return mmu;
107 mmu->idx = 0;
108 mmu->subject = dom;
109 return mmu;
110 }
112 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
113 unsigned long long ptr, unsigned long long val)
114 {
115 mmu->updates[mmu->idx].ptr = ptr;
116 mmu->updates[mmu->idx].val = val;
118 if ( ++mmu->idx == MAX_MMU_UPDATES )
119 return flush_mmu_updates(xc_handle, mmu);
121 return 0;
122 }
124 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu)
125 {
126 return flush_mmu_updates(xc_handle, mmu);
127 }
129 int xc_memory_op(int xc_handle,
130 int cmd,
131 void *arg)
132 {
133 DECLARE_HYPERCALL;
134 struct xen_memory_reservation *reservation = arg;
135 struct xen_machphys_mfn_list *xmml = arg;
136 struct xen_translate_gpfn_list *trans = arg;
137 unsigned long *extent_start;
138 unsigned long *gpfn_list;
139 unsigned long *mfn_list;
140 long ret = -EINVAL;
142 hypercall.op = __HYPERVISOR_memory_op;
143 hypercall.arg[0] = (unsigned long)cmd;
144 hypercall.arg[1] = (unsigned long)arg;
146 switch ( cmd )
147 {
148 case XENMEM_increase_reservation:
149 case XENMEM_decrease_reservation:
150 case XENMEM_populate_physmap:
151 if ( mlock(reservation, sizeof(*reservation)) != 0 )
152 {
153 PERROR("Could not mlock");
154 goto out1;
155 }
156 get_xen_guest_handle(extent_start, reservation->extent_start);
157 if ( (extent_start != NULL) &&
158 (mlock(extent_start,
159 reservation->nr_extents * sizeof(unsigned long)) != 0) )
160 {
161 PERROR("Could not mlock");
162 safe_munlock(reservation, sizeof(*reservation));
163 goto out1;
164 }
165 break;
166 case XENMEM_machphys_mfn_list:
167 if ( mlock(xmml, sizeof(*xmml)) != 0 )
168 {
169 PERROR("Could not mlock");
170 goto out1;
171 }
172 get_xen_guest_handle(extent_start, xmml->extent_start);
173 if ( mlock(extent_start,
174 xmml->max_extents * sizeof(unsigned long)) != 0 )
175 {
176 PERROR("Could not mlock");
177 safe_munlock(xmml, sizeof(*xmml));
178 goto out1;
179 }
180 break;
181 case XENMEM_add_to_physmap:
182 if ( mlock(arg, sizeof(struct xen_add_to_physmap)) )
183 {
184 PERROR("Could not mlock");
185 goto out1;
186 }
187 break;
188 case XENMEM_translate_gpfn_list:
189 if ( mlock(trans, sizeof(*trans)) != 0 )
190 {
191 PERROR("Could not mlock");
192 goto out1;
193 }
194 get_xen_guest_handle(gpfn_list, trans->gpfn_list);
195 if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
196 {
197 PERROR("Could not mlock");
198 safe_munlock(trans, sizeof(*trans));
199 goto out1;
200 }
201 get_xen_guest_handle(mfn_list, trans->mfn_list);
202 if ( mlock(mfn_list, trans->nr_gpfns * sizeof(long)) != 0 )
203 {
204 PERROR("Could not mlock");
205 safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long));
206 safe_munlock(trans, sizeof(*trans));
207 goto out1;
208 }
209 break;
210 }
212 ret = do_xen_hypercall(xc_handle, &hypercall);
214 switch ( cmd )
215 {
216 case XENMEM_increase_reservation:
217 case XENMEM_decrease_reservation:
218 case XENMEM_populate_physmap:
219 safe_munlock(reservation, sizeof(*reservation));
220 get_xen_guest_handle(extent_start, reservation->extent_start);
221 if ( extent_start != NULL )
222 safe_munlock(extent_start,
223 reservation->nr_extents * sizeof(unsigned long));
224 break;
225 case XENMEM_machphys_mfn_list:
226 safe_munlock(xmml, sizeof(*xmml));
227 get_xen_guest_handle(extent_start, xmml->extent_start);
228 safe_munlock(extent_start,
229 xmml->max_extents * sizeof(unsigned long));
230 break;
231 case XENMEM_add_to_physmap:
232 safe_munlock(arg, sizeof(struct xen_add_to_physmap));
233 break;
234 case XENMEM_translate_gpfn_list:
235 get_xen_guest_handle(mfn_list, trans->mfn_list);
236 safe_munlock(mfn_list, trans->nr_gpfns * sizeof(long));
237 get_xen_guest_handle(gpfn_list, trans->gpfn_list);
238 safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long));
239 safe_munlock(trans, sizeof(*trans));
240 break;
241 }
243 out1:
244 return ret;
245 }
248 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
249 {
250 DECLARE_DOM0_OP;
252 op.cmd = DOM0_GETVCPUINFO;
253 op.u.getvcpuinfo.domain = (domid_t)domid;
254 op.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
255 if ( (do_dom0_op(xc_handle, &op) < 0) )
256 {
257 PERROR("Could not get info on domain");
258 return -1;
259 }
260 return op.u.getvcpuinfo.cpu_time;
261 }
264 int xc_get_pfn_list(int xc_handle,
265 uint32_t domid,
266 unsigned long *pfn_buf,
267 unsigned long max_pfns)
268 {
269 DECLARE_DOM0_OP;
270 int ret;
271 op.cmd = DOM0_GETMEMLIST;
272 op.u.getmemlist.domain = (domid_t)domid;
273 op.u.getmemlist.max_pfns = max_pfns;
274 set_xen_guest_handle(op.u.getmemlist.buffer, pfn_buf);
276 #ifdef VALGRIND
277 memset(pfn_buf, 0, max_pfns * sizeof(unsigned long));
278 #endif
280 if ( mlock(pfn_buf, max_pfns * sizeof(unsigned long)) != 0 )
281 {
282 PERROR("xc_get_pfn_list: pfn_buf mlock failed");
283 return -1;
284 }
286 ret = do_dom0_op(xc_handle, &op);
288 safe_munlock(pfn_buf, max_pfns * sizeof(unsigned long));
290 #if 0
291 #ifdef DEBUG
292 DPRINTF(("Ret for xc_get_pfn_list is %d\n", ret));
293 if (ret >= 0) {
294 int i, j;
295 for (i = 0; i < op.u.getmemlist.num_pfns; i += 16) {
296 fprintf(stderr, "0x%x: ", i);
297 for (j = 0; j < 16; j++)
298 fprintf(stderr, "0x%lx ", pfn_buf[i + j]);
299 fprintf(stderr, "\n");
300 }
301 }
302 #endif
303 #endif
305 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
306 }
308 long xc_get_tot_pages(int xc_handle, uint32_t domid)
309 {
310 DECLARE_DOM0_OP;
311 op.cmd = DOM0_GETDOMAININFO;
312 op.u.getdomaininfo.domain = (domid_t)domid;
313 return (do_dom0_op(xc_handle, &op) < 0) ?
314 -1 : op.u.getdomaininfo.tot_pages;
315 }
317 int xc_copy_to_domain_page(int xc_handle,
318 uint32_t domid,
319 unsigned long dst_pfn,
320 const char *src_page)
321 {
322 void *vaddr = xc_map_foreign_range(
323 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
324 if ( vaddr == NULL )
325 return -1;
326 memcpy(vaddr, src_page, PAGE_SIZE);
327 munmap(vaddr, PAGE_SIZE);
328 return 0;
329 }
331 int xc_clear_domain_page(int xc_handle,
332 uint32_t domid,
333 unsigned long dst_pfn)
334 {
335 void *vaddr = xc_map_foreign_range(
336 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
337 if ( vaddr == NULL )
338 return -1;
339 memset(vaddr, 0, PAGE_SIZE);
340 munmap(vaddr, PAGE_SIZE);
341 return 0;
342 }
344 unsigned long xc_get_filesz(int fd)
345 {
346 uint16_t sig;
347 uint32_t _sz = 0;
348 unsigned long sz;
350 lseek(fd, 0, SEEK_SET);
351 if ( read(fd, &sig, sizeof(sig)) != sizeof(sig) )
352 return 0;
353 sz = lseek(fd, 0, SEEK_END);
354 if ( sig == 0x8b1f ) /* GZIP signature? */
355 {
356 lseek(fd, -4, SEEK_END);
357 if ( read(fd, &_sz, 4) != 4 )
358 return 0;
359 sz = _sz;
360 }
361 lseek(fd, 0, SEEK_SET);
363 return sz;
364 }
366 void xc_map_memcpy(unsigned long dst, const char *src, unsigned long size,
367 int xch, uint32_t dom, unsigned long *parray,
368 unsigned long vstart)
369 {
370 char *va;
371 unsigned long chunksz, done, pa;
373 for ( done = 0; done < size; done += chunksz )
374 {
375 pa = dst + done - vstart;
376 va = xc_map_foreign_range(
377 xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
378 chunksz = size - done;
379 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
380 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
381 memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
382 munmap(va, PAGE_SIZE);
383 }
384 }
386 int xc_dom0_op(int xc_handle, dom0_op_t *op)
387 {
388 return do_dom0_op(xc_handle, op);
389 }
391 int xc_version(int xc_handle, int cmd, void *arg)
392 {
393 int rc, argsize = 0;
395 switch ( cmd )
396 {
397 case XENVER_extraversion:
398 argsize = sizeof(xen_extraversion_t);
399 break;
400 case XENVER_compile_info:
401 argsize = sizeof(xen_compile_info_t);
402 break;
403 case XENVER_capabilities:
404 argsize = sizeof(xen_capabilities_info_t);
405 break;
406 case XENVER_changeset:
407 argsize = sizeof(xen_changeset_info_t);
408 break;
409 case XENVER_platform_parameters:
410 argsize = sizeof(xen_platform_parameters_t);
411 break;
412 }
414 if ( (argsize != 0) && (mlock(arg, argsize) != 0) )
415 {
416 PERROR("Could not lock memory for version hypercall");
417 return -ENOMEM;
418 }
420 #ifdef VALGRIND
421 if (argsize != 0)
422 memset(arg, 0, argsize);
423 #endif
425 rc = do_xen_version(xc_handle, cmd, arg);
427 if ( argsize != 0 )
428 safe_munlock(arg, argsize);
430 return rc;
431 }
433 /*
434 * Local variables:
435 * mode: C
436 * c-set-style: "BSD"
437 * c-basic-offset: 4
438 * tab-width: 4
439 * indent-tabs-mode: nil
440 * End:
441 */