direct-io.hg

view tools/libxc/xc_private.c @ 12988:e080700efa56

[TOOLS] Fix the build. Clearly demarcate PPC-specific stuff.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Dec 13 10:23:53 2006 +0000 (2006-12-13)
parents c519ab0f70f3
children cd532c9351fc
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <inttypes.h>
8 #include "xc_private.h"
9 #include "xg_private.h"
10 #include <stdarg.h>
11 #include <pthread.h>
13 static __thread xc_error last_error = { XC_ERROR_NONE, ""};
14 #if DEBUG
15 static xc_error_handler error_handler = xc_default_error_handler;
16 #else
17 static xc_error_handler error_handler = NULL;
18 #endif
20 void xc_default_error_handler(const xc_error const *err)
21 {
22 const char *desc = xc_error_code_to_desc(err->code);
23 fprintf(stderr, "ERROR %s: %s\n", desc, err->message);
24 }
26 const xc_error const *xc_get_last_error(void)
27 {
28 return &last_error;
29 }
31 void xc_clear_last_error(void)
32 {
33 last_error.code = XC_ERROR_NONE;
34 last_error.message[0] = '\0';
35 }
37 const char *xc_error_code_to_desc(int code)
38 {
39 /* Sync to members of xc_error_code enumeration in xenctrl.h */
40 switch ( code )
41 {
42 case XC_ERROR_NONE:
43 return "No error details";
44 case XC_INTERNAL_ERROR:
45 return "Internal error";
46 case XC_INVALID_KERNEL:
47 return "Invalid kernel";
48 }
50 return "Unknown error code";
51 }
53 xc_error_handler xc_set_error_handler(xc_error_handler handler)
54 {
55 xc_error_handler old = error_handler;
56 error_handler = handler;
57 return old;
58 }
61 static void _xc_set_error(int code, const char *msg)
62 {
63 last_error.code = code;
64 strncpy(last_error.message, msg, XC_MAX_ERROR_MSG_LEN - 1);
65 last_error.message[XC_MAX_ERROR_MSG_LEN-1] = '\0';
66 }
68 void xc_set_error(int code, const char *fmt, ...)
69 {
70 int saved_errno = errno;
71 char msg[XC_MAX_ERROR_MSG_LEN];
72 va_list args;
74 va_start(args, fmt);
75 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
76 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
77 va_end(args);
79 _xc_set_error(code, msg);
81 errno = saved_errno;
83 if ( error_handler != NULL )
84 error_handler(&last_error);
85 }
87 int lock_pages(void *addr, size_t len)
88 {
89 int e = 0;
90 #ifndef __sun__
91 e = mlock(addr, len);
92 #endif
93 return (e);
94 }
96 void unlock_pages(void *addr, size_t len)
97 {
98 #ifndef __sun__
99 safe_munlock(addr, len);
100 #endif
101 }
103 /* NB: arr must be locked */
104 int xc_get_pfn_type_batch(int xc_handle,
105 uint32_t dom, int num, unsigned long *arr)
106 {
107 DECLARE_DOMCTL;
108 domctl.cmd = XEN_DOMCTL_getpageframeinfo2;
109 domctl.domain = (domid_t)dom;
110 domctl.u.getpageframeinfo2.num = num;
111 set_xen_guest_handle(domctl.u.getpageframeinfo2.array, arr);
112 return do_domctl(xc_handle, &domctl);
113 }
115 int xc_mmuext_op(
116 int xc_handle,
117 struct mmuext_op *op,
118 unsigned int nr_ops,
119 domid_t dom)
120 {
121 DECLARE_HYPERCALL;
122 long ret = -EINVAL;
124 hypercall.op = __HYPERVISOR_mmuext_op;
125 hypercall.arg[0] = (unsigned long)op;
126 hypercall.arg[1] = (unsigned long)nr_ops;
127 hypercall.arg[2] = (unsigned long)0;
128 hypercall.arg[3] = (unsigned long)dom;
130 if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 )
131 {
132 PERROR("Could not lock memory for Xen hypercall");
133 goto out1;
134 }
136 ret = do_xen_hypercall(xc_handle, &hypercall);
138 unlock_pages(op, nr_ops*sizeof(*op));
140 out1:
141 return ret;
142 }
144 static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu)
145 {
146 int err = 0;
147 DECLARE_HYPERCALL;
149 if ( mmu->idx == 0 )
150 return 0;
152 hypercall.op = __HYPERVISOR_mmu_update;
153 hypercall.arg[0] = (unsigned long)mmu->updates;
154 hypercall.arg[1] = (unsigned long)mmu->idx;
155 hypercall.arg[2] = 0;
156 hypercall.arg[3] = mmu->subject;
158 if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
159 {
160 PERROR("flush_mmu_updates: mmu updates lock_pages failed");
161 err = 1;
162 goto out;
163 }
165 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
166 {
167 ERROR("Failure when submitting mmu updates");
168 err = 1;
169 }
171 mmu->idx = 0;
173 unlock_pages(mmu->updates, sizeof(mmu->updates));
175 out:
176 return err;
177 }
179 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom)
180 {
181 xc_mmu_t *mmu = malloc(sizeof(xc_mmu_t));
182 if ( mmu == NULL )
183 return mmu;
184 mmu->idx = 0;
185 mmu->subject = dom;
186 return mmu;
187 }
189 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
190 unsigned long long ptr, unsigned long long val)
191 {
192 mmu->updates[mmu->idx].ptr = ptr;
193 mmu->updates[mmu->idx].val = val;
195 if ( ++mmu->idx == MAX_MMU_UPDATES )
196 return flush_mmu_updates(xc_handle, mmu);
198 return 0;
199 }
201 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu)
202 {
203 return flush_mmu_updates(xc_handle, mmu);
204 }
206 int xc_memory_op(int xc_handle,
207 int cmd,
208 void *arg)
209 {
210 DECLARE_HYPERCALL;
211 struct xen_memory_reservation *reservation = arg;
212 struct xen_machphys_mfn_list *xmml = arg;
213 xen_pfn_t *extent_start;
214 long ret = -EINVAL;
216 hypercall.op = __HYPERVISOR_memory_op;
217 hypercall.arg[0] = (unsigned long)cmd;
218 hypercall.arg[1] = (unsigned long)arg;
220 switch ( cmd )
221 {
222 case XENMEM_increase_reservation:
223 case XENMEM_decrease_reservation:
224 case XENMEM_populate_physmap:
225 if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
226 {
227 PERROR("Could not lock");
228 goto out1;
229 }
230 get_xen_guest_handle(extent_start, reservation->extent_start);
231 if ( (extent_start != NULL) &&
232 (lock_pages(extent_start,
233 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
234 {
235 PERROR("Could not lock");
236 unlock_pages(reservation, sizeof(*reservation));
237 goto out1;
238 }
239 break;
240 case XENMEM_machphys_mfn_list:
241 if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
242 {
243 PERROR("Could not lock");
244 goto out1;
245 }
246 get_xen_guest_handle(extent_start, xmml->extent_start);
247 if ( lock_pages(extent_start,
248 xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
249 {
250 PERROR("Could not lock");
251 unlock_pages(xmml, sizeof(*xmml));
252 goto out1;
253 }
254 break;
255 case XENMEM_add_to_physmap:
256 if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
257 {
258 PERROR("Could not lock");
259 goto out1;
260 }
261 break;
262 }
264 ret = do_xen_hypercall(xc_handle, &hypercall);
266 switch ( cmd )
267 {
268 case XENMEM_increase_reservation:
269 case XENMEM_decrease_reservation:
270 case XENMEM_populate_physmap:
271 unlock_pages(reservation, sizeof(*reservation));
272 get_xen_guest_handle(extent_start, reservation->extent_start);
273 if ( extent_start != NULL )
274 unlock_pages(extent_start,
275 reservation->nr_extents * sizeof(xen_pfn_t));
276 break;
277 case XENMEM_machphys_mfn_list:
278 unlock_pages(xmml, sizeof(*xmml));
279 get_xen_guest_handle(extent_start, xmml->extent_start);
280 unlock_pages(extent_start,
281 xmml->max_extents * sizeof(xen_pfn_t));
282 break;
283 case XENMEM_add_to_physmap:
284 unlock_pages(arg, sizeof(struct xen_add_to_physmap));
285 break;
286 }
288 out1:
289 return ret;
290 }
293 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
294 {
295 DECLARE_DOMCTL;
297 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
298 domctl.domain = (domid_t)domid;
299 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
300 if ( (do_domctl(xc_handle, &domctl) < 0) )
301 {
302 PERROR("Could not get info on domain");
303 return -1;
304 }
305 return domctl.u.getvcpuinfo.cpu_time;
306 }
309 #ifndef __ia64__
310 int xc_get_pfn_list(int xc_handle,
311 uint32_t domid,
312 xen_pfn_t *pfn_buf,
313 unsigned long max_pfns)
314 {
315 DECLARE_DOMCTL;
316 int ret;
317 domctl.cmd = XEN_DOMCTL_getmemlist;
318 domctl.domain = (domid_t)domid;
319 domctl.u.getmemlist.max_pfns = max_pfns;
320 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
322 #ifdef VALGRIND
323 memset(pfn_buf, 0, max_pfns * sizeof(xen_pfn_t));
324 #endif
326 if ( lock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 )
327 {
328 PERROR("xc_get_pfn_list: pfn_buf lock failed");
329 return -1;
330 }
332 ret = do_domctl(xc_handle, &domctl);
334 unlock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t));
336 #if 0
337 #ifdef DEBUG
338 DPRINTF(("Ret for xc_get_pfn_list is %d\n", ret));
339 if (ret >= 0) {
340 int i, j;
341 for (i = 0; i < domctl.u.getmemlist.num_pfns; i += 16) {
342 DPRINTF("0x%x: ", i);
343 for (j = 0; j < 16; j++)
344 DPRINTF("0x%lx ", pfn_buf[i + j]);
345 DPRINTF("\n");
346 }
347 }
348 #endif
349 #endif
351 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
352 }
353 #endif
355 long xc_get_tot_pages(int xc_handle, uint32_t domid)
356 {
357 DECLARE_DOMCTL;
358 domctl.cmd = XEN_DOMCTL_getdomaininfo;
359 domctl.domain = (domid_t)domid;
360 return (do_domctl(xc_handle, &domctl) < 0) ?
361 -1 : domctl.u.getdomaininfo.tot_pages;
362 }
364 int xc_copy_to_domain_page(int xc_handle,
365 uint32_t domid,
366 unsigned long dst_pfn,
367 const char *src_page)
368 {
369 void *vaddr = xc_map_foreign_range(
370 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
371 if ( vaddr == NULL )
372 return -1;
373 memcpy(vaddr, src_page, PAGE_SIZE);
374 munmap(vaddr, PAGE_SIZE);
375 return 0;
376 }
378 int xc_clear_domain_page(int xc_handle,
379 uint32_t domid,
380 unsigned long dst_pfn)
381 {
382 void *vaddr = xc_map_foreign_range(
383 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
384 if ( vaddr == NULL )
385 return -1;
386 memset(vaddr, 0, PAGE_SIZE);
387 munmap(vaddr, PAGE_SIZE);
388 return 0;
389 }
391 void xc_map_memcpy(unsigned long dst, const char *src, unsigned long size,
392 int xch, uint32_t dom, xen_pfn_t *parray,
393 unsigned long vstart)
394 {
395 char *va;
396 unsigned long chunksz, done, pa;
398 for ( done = 0; done < size; done += chunksz )
399 {
400 pa = dst + done - vstart;
401 va = xc_map_foreign_range(
402 xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
403 chunksz = size - done;
404 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
405 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
406 memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
407 munmap(va, PAGE_SIZE);
408 }
409 }
411 int xc_domctl(int xc_handle, struct xen_domctl *domctl)
412 {
413 return do_domctl(xc_handle, domctl);
414 }
416 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl)
417 {
418 return do_sysctl(xc_handle, sysctl);
419 }
421 int xc_version(int xc_handle, int cmd, void *arg)
422 {
423 int rc, argsize = 0;
425 switch ( cmd )
426 {
427 case XENVER_extraversion:
428 argsize = sizeof(xen_extraversion_t);
429 break;
430 case XENVER_compile_info:
431 argsize = sizeof(xen_compile_info_t);
432 break;
433 case XENVER_capabilities:
434 argsize = sizeof(xen_capabilities_info_t);
435 break;
436 case XENVER_changeset:
437 argsize = sizeof(xen_changeset_info_t);
438 break;
439 case XENVER_platform_parameters:
440 argsize = sizeof(xen_platform_parameters_t);
441 break;
442 }
444 if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
445 {
446 PERROR("Could not lock memory for version hypercall");
447 return -ENOMEM;
448 }
450 #ifdef VALGRIND
451 if (argsize != 0)
452 memset(arg, 0, argsize);
453 #endif
455 rc = do_xen_version(xc_handle, cmd, arg);
457 if ( argsize != 0 )
458 unlock_pages(arg, argsize);
460 return rc;
461 }
463 unsigned long xc_make_page_below_4G(
464 int xc_handle, uint32_t domid, unsigned long mfn)
465 {
466 xen_pfn_t old_mfn = mfn;
467 xen_pfn_t new_mfn;
469 if ( xc_domain_memory_decrease_reservation(
470 xc_handle, domid, 1, 0, &old_mfn) != 0 )
471 {
472 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
473 return 0;
474 }
476 if ( xc_domain_memory_increase_reservation(
477 xc_handle, domid, 1, 0, 32, &new_mfn) != 0 )
478 {
479 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
480 return 0;
481 }
483 return new_mfn;
484 }
486 char *safe_strerror(int errcode)
487 {
488 static __thread char errbuf[32];
489 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
490 char *strerror_str;
492 /*
493 * Thread-unsafe strerror() is protected by a local mutex. We copy
494 * the string to a thread-private buffer before releasing the mutex.
495 */
496 pthread_mutex_lock(&mutex);
497 strerror_str = strerror(errcode);
498 strncpy(errbuf, strerror_str, sizeof(errbuf));
499 errbuf[sizeof(errbuf)-1] = '\0';
500 pthread_mutex_unlock(&mutex);
502 return errbuf;
503 }
505 /*
506 * Local variables:
507 * mode: C
508 * c-set-style: "BSD"
509 * c-basic-offset: 4
510 * tab-width: 4
511 * indent-tabs-mode: nil
512 * End:
513 */