direct-io.hg

view tools/libxc/xc_domain.c @ 12765:2dd4569e0640

[LIBXC] Add an error reporting API to the libxc library.

- An 'xc_error' struct is used to pass around error
details. Currently contains two members 'code' an enumeration of
error types, and 'message' a free text description of the specific
problem.

- The xc_get_last_error() method returns a const pointer to the
internal instance of this struct manged by libxc. By returning a
const pointer we can add extra members to the end of the struct at
any time without worrying about ABI of callers. This will let us
provide more fine-grained info if needed in the future.

- The xc_error instance is statically defined inside libxc and marked
__thread. This ensures that errors are recorded per-thread, and
that when dealing with errors we never need to call malloc - all
storage needed is statically allocated.

- The xc_clear_last_error() method resets any currently recorded
error details

- The xc_error_code_to_desc() method converts the integer error code
into a generic user facing messsage. eg "Invalid kernel". Together
with the 'message' field from xc_error, this provides the user
visible feedback. eg "Invalid kernel: Non PAE-kernel on PAE host."

- A callback can be registered with xc_set_error_handler to receive
notification whenever an error is recorded, rather than querying
for error details after the fact with xc_get_last_error

- If built with -DDEBUG set, a default error handler will be
registered which calls fprintf(stderr), thus maintaining current
behaviour of logging errors to stderr during developer builds.

- The python binding for libxc is updated to use xc_get_last_error
to pull out error details whenever appropriate, instead of
returning info based on 'errno'

- The xc_set_error method is private to libxc internals, and is used
for setting error details

- The ERROR and PERROR macros have been updated to call xc_set_error
automatically specifying XC_INTERNAL_ERROR as the error code. This
gives a generic error report for all current failure points

- Some uses of the ERROR macro have been replaced with explicit
calls to xc_set_error to enable finer grained error reporting. In
particular the code dealing with invalid kernel types uses this
to report about PAE/architecture/wordsize mismatches

The patch has been tested by calling xm create against a varietry of
config files defining invalid kernels of various kinds. It has also
been tested with libvirt talking to xend. In both cases the error
messages were propagated all the way back up the stack.

There is only one place where I need to do further work. The suspend
& restore APIs in Xend invoke external helper programs rather than
calling libxc directly. This means that error details are essentially
lost. Since there is already code in XenD which scans STDERR from
these programs I will investigate adapting this to extract actual
error messages from these helpers.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
author kfraser@localhost.localdomain
date Thu Dec 07 11:36:26 2006 +0000 (2006-12-07)
parents 2ae4e4e89d6d
children df5fa63490f4
line source
1 /******************************************************************************
2 * xc_domain.c
3 *
4 * API for manipulating and obtaining information on domains.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 */
9 #include "xc_private.h"
10 #include <xen/memory.h>
12 int xc_domain_create(int xc_handle,
13 uint32_t ssidref,
14 xen_domain_handle_t handle,
15 uint32_t flags,
16 uint32_t *pdomid)
17 {
18 int err;
19 DECLARE_DOMCTL;
21 domctl.cmd = XEN_DOMCTL_createdomain;
22 domctl.domain = (domid_t)*pdomid;
23 domctl.u.createdomain.ssidref = ssidref;
24 domctl.u.createdomain.flags = flags;
25 memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
26 if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
27 return err;
29 *pdomid = (uint16_t)domctl.domain;
30 return 0;
31 }
34 int xc_domain_pause(int xc_handle,
35 uint32_t domid)
36 {
37 DECLARE_DOMCTL;
38 domctl.cmd = XEN_DOMCTL_pausedomain;
39 domctl.domain = (domid_t)domid;
40 return do_domctl(xc_handle, &domctl);
41 }
44 int xc_domain_unpause(int xc_handle,
45 uint32_t domid)
46 {
47 DECLARE_DOMCTL;
48 domctl.cmd = XEN_DOMCTL_unpausedomain;
49 domctl.domain = (domid_t)domid;
50 return do_domctl(xc_handle, &domctl);
51 }
54 int xc_domain_destroy(int xc_handle,
55 uint32_t domid)
56 {
57 DECLARE_DOMCTL;
58 domctl.cmd = XEN_DOMCTL_destroydomain;
59 domctl.domain = (domid_t)domid;
60 return do_domctl(xc_handle, &domctl);
61 }
63 int xc_domain_shutdown(int xc_handle,
64 uint32_t domid,
65 int reason)
66 {
67 int ret = -1;
68 sched_remote_shutdown_t arg;
69 DECLARE_HYPERCALL;
71 hypercall.op = __HYPERVISOR_sched_op;
72 hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown;
73 hypercall.arg[1] = (unsigned long)&arg;
74 arg.domain_id = domid;
75 arg.reason = reason;
77 if ( lock_pages(&arg, sizeof(arg)) != 0 )
78 {
79 PERROR("Could not lock memory for Xen hypercall");
80 goto out1;
81 }
83 ret = do_xen_hypercall(xc_handle, &hypercall);
85 unlock_pages(&arg, sizeof(arg));
87 out1:
88 return ret;
89 }
92 int xc_vcpu_setaffinity(int xc_handle,
93 uint32_t domid,
94 int vcpu,
95 uint64_t cpumap)
96 {
97 DECLARE_DOMCTL;
98 int ret = -1;
100 domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
101 domctl.domain = (domid_t)domid;
102 domctl.u.vcpuaffinity.vcpu = vcpu;
104 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
105 (uint8_t *)&cpumap);
106 domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
108 if ( lock_pages(&cpumap, sizeof(cpumap)) != 0 )
109 {
110 PERROR("Could not lock memory for Xen hypercall");
111 goto out;
112 }
114 ret = do_domctl(xc_handle, &domctl);
116 unlock_pages(&cpumap, sizeof(cpumap));
118 out:
119 return ret;
120 }
123 int xc_vcpu_getaffinity(int xc_handle,
124 uint32_t domid,
125 int vcpu,
126 uint64_t *cpumap)
127 {
128 DECLARE_DOMCTL;
129 int ret = -1;
131 domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
132 domctl.domain = (domid_t)domid;
133 domctl.u.vcpuaffinity.vcpu = vcpu;
135 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
136 (uint8_t *)cpumap);
137 domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8;
139 if ( lock_pages(cpumap, sizeof(*cpumap)) != 0 )
140 {
141 PERROR("Could not lock memory for Xen hypercall");
142 goto out;
143 }
145 ret = do_domctl(xc_handle, &domctl);
147 unlock_pages(cpumap, sizeof(*cpumap));
149 out:
150 return ret;
151 }
154 int xc_domain_getinfo(int xc_handle,
155 uint32_t first_domid,
156 unsigned int max_doms,
157 xc_dominfo_t *info)
158 {
159 unsigned int nr_doms;
160 uint32_t next_domid = first_domid;
161 DECLARE_DOMCTL;
162 int rc = 0;
164 memset(info, 0, max_doms*sizeof(xc_dominfo_t));
166 for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
167 {
168 domctl.cmd = XEN_DOMCTL_getdomaininfo;
169 domctl.domain = (domid_t)next_domid;
170 if ( (rc = do_domctl(xc_handle, &domctl)) < 0 )
171 break;
172 info->domid = (uint16_t)domctl.domain;
174 info->dying = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying);
175 info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown);
176 info->paused = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_paused);
177 info->blocked = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_blocked);
178 info->running = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
179 info->hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
181 info->shutdown_reason =
182 (domctl.u.getdomaininfo.flags>>XEN_DOMINF_shutdownshift) &
183 XEN_DOMINF_shutdownmask;
185 if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
186 {
187 info->shutdown = 0;
188 info->crashed = 1;
189 }
191 info->ssidref = domctl.u.getdomaininfo.ssidref;
192 info->nr_pages = domctl.u.getdomaininfo.tot_pages;
193 info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
194 info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
195 info->cpu_time = domctl.u.getdomaininfo.cpu_time;
196 info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
197 info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
199 memcpy(info->handle, domctl.u.getdomaininfo.handle,
200 sizeof(xen_domain_handle_t));
202 next_domid = (uint16_t)domctl.domain + 1;
203 info++;
204 }
206 if ( nr_doms == 0 )
207 return rc;
209 return nr_doms;
210 }
212 int xc_domain_getinfolist(int xc_handle,
213 uint32_t first_domain,
214 unsigned int max_domains,
215 xc_domaininfo_t *info)
216 {
217 int ret = 0;
218 DECLARE_SYSCTL;
220 if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
221 return -1;
223 sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
224 sysctl.u.getdomaininfolist.first_domain = first_domain;
225 sysctl.u.getdomaininfolist.max_domains = max_domains;
226 set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
228 if ( xc_sysctl(xc_handle, &sysctl) < 0 )
229 ret = -1;
230 else
231 ret = sysctl.u.getdomaininfolist.num_domains;
233 unlock_pages(info, max_domains*sizeof(xc_domaininfo_t));
235 return ret;
236 }
238 int xc_vcpu_getcontext(int xc_handle,
239 uint32_t domid,
240 uint32_t vcpu,
241 vcpu_guest_context_t *ctxt)
242 {
243 int rc;
244 DECLARE_DOMCTL;
246 domctl.cmd = XEN_DOMCTL_getvcpucontext;
247 domctl.domain = (domid_t)domid;
248 domctl.u.vcpucontext.vcpu = (uint16_t)vcpu;
249 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
251 if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 )
252 return rc;
254 rc = do_domctl(xc_handle, &domctl);
256 unlock_pages(ctxt, sizeof(*ctxt));
258 return rc;
259 }
262 int xc_shadow_control(int xc_handle,
263 uint32_t domid,
264 unsigned int sop,
265 unsigned long *dirty_bitmap,
266 unsigned long pages,
267 unsigned long *mb,
268 uint32_t mode,
269 xc_shadow_op_stats_t *stats)
270 {
271 int rc;
272 DECLARE_DOMCTL;
273 domctl.cmd = XEN_DOMCTL_shadow_op;
274 domctl.domain = (domid_t)domid;
275 domctl.u.shadow_op.op = sop;
276 domctl.u.shadow_op.pages = pages;
277 domctl.u.shadow_op.mb = mb ? *mb : 0;
278 domctl.u.shadow_op.mode = mode;
279 set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap, dirty_bitmap);
281 rc = do_domctl(xc_handle, &domctl);
283 if ( stats )
284 memcpy(stats, &domctl.u.shadow_op.stats,
285 sizeof(xc_shadow_op_stats_t));
287 if ( mb )
288 *mb = domctl.u.shadow_op.mb;
290 return (rc == 0) ? domctl.u.shadow_op.pages : rc;
291 }
293 int xc_domain_setcpuweight(int xc_handle,
294 uint32_t domid,
295 float weight)
296 {
297 int sched_id;
298 int ret;
300 /* Figure out which scheduler is currently used: */
301 if ( (ret = xc_sched_id(xc_handle, &sched_id)) != 0 )
302 return ret;
304 /* No-op. */
305 return 0;
306 }
308 int xc_domain_setmaxmem(int xc_handle,
309 uint32_t domid,
310 unsigned int max_memkb)
311 {
312 DECLARE_DOMCTL;
313 domctl.cmd = XEN_DOMCTL_max_mem;
314 domctl.domain = (domid_t)domid;
315 domctl.u.max_mem.max_memkb = max_memkb;
316 return do_domctl(xc_handle, &domctl);
317 }
319 int xc_domain_set_time_offset(int xc_handle,
320 uint32_t domid,
321 int32_t time_offset_seconds)
322 {
323 DECLARE_DOMCTL;
324 domctl.cmd = XEN_DOMCTL_settimeoffset;
325 domctl.domain = (domid_t)domid;
326 domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
327 return do_domctl(xc_handle, &domctl);
328 }
330 int xc_domain_memory_increase_reservation(int xc_handle,
331 uint32_t domid,
332 unsigned long nr_extents,
333 unsigned int extent_order,
334 unsigned int address_bits,
335 xen_pfn_t *extent_start)
336 {
337 int err;
338 struct xen_memory_reservation reservation = {
339 .nr_extents = nr_extents,
340 .extent_order = extent_order,
341 .address_bits = address_bits,
342 .domid = domid
343 };
345 /* may be NULL */
346 set_xen_guest_handle(reservation.extent_start, extent_start);
348 err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
349 if ( err == nr_extents )
350 return 0;
352 if ( err >= 0 )
353 {
354 DPRINTF("Failed allocation for dom %d: "
355 "%ld extents of order %d, addr_bits %d\n",
356 domid, nr_extents, extent_order, address_bits);
357 errno = ENOMEM;
358 err = -1;
359 }
361 return err;
362 }
364 int xc_domain_memory_decrease_reservation(int xc_handle,
365 uint32_t domid,
366 unsigned long nr_extents,
367 unsigned int extent_order,
368 xen_pfn_t *extent_start)
369 {
370 int err;
371 struct xen_memory_reservation reservation = {
372 .nr_extents = nr_extents,
373 .extent_order = extent_order,
374 .address_bits = 0,
375 .domid = domid
376 };
378 set_xen_guest_handle(reservation.extent_start, extent_start);
380 if ( extent_start == NULL )
381 {
382 DPRINTF("decrease_reservation extent_start is NULL!\n");
383 errno = EINVAL;
384 return -1;
385 }
387 err = xc_memory_op(xc_handle, XENMEM_decrease_reservation, &reservation);
388 if ( err == nr_extents )
389 return 0;
391 if ( err >= 0 )
392 {
393 DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n",
394 domid, nr_extents, extent_order);
395 errno = EINVAL;
396 err = -1;
397 }
399 return err;
400 }
402 int xc_domain_memory_populate_physmap(int xc_handle,
403 uint32_t domid,
404 unsigned long nr_extents,
405 unsigned int extent_order,
406 unsigned int address_bits,
407 xen_pfn_t *extent_start)
408 {
409 int err;
410 struct xen_memory_reservation reservation = {
411 .nr_extents = nr_extents,
412 .extent_order = extent_order,
413 .address_bits = address_bits,
414 .domid = domid
415 };
416 set_xen_guest_handle(reservation.extent_start, extent_start);
418 err = xc_memory_op(xc_handle, XENMEM_populate_physmap, &reservation);
419 if ( err == nr_extents )
420 return 0;
422 if ( err >= 0 )
423 {
424 DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
425 domid, nr_extents, extent_order);
426 errno = EBUSY;
427 err = -1;
428 }
430 return err;
431 }
433 int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max)
434 {
435 DECLARE_DOMCTL;
436 domctl.cmd = XEN_DOMCTL_max_vcpus;
437 domctl.domain = (domid_t)domid;
438 domctl.u.max_vcpus.max = max;
439 return do_domctl(xc_handle, &domctl);
440 }
442 int xc_domain_sethandle(int xc_handle, uint32_t domid,
443 xen_domain_handle_t handle)
444 {
445 DECLARE_DOMCTL;
446 domctl.cmd = XEN_DOMCTL_setdomainhandle;
447 domctl.domain = (domid_t)domid;
448 memcpy(domctl.u.setdomainhandle.handle, handle,
449 sizeof(xen_domain_handle_t));
450 return do_domctl(xc_handle, &domctl);
451 }
453 int xc_vcpu_getinfo(int xc_handle,
454 uint32_t domid,
455 uint32_t vcpu,
456 xc_vcpuinfo_t *info)
457 {
458 int rc;
459 DECLARE_DOMCTL;
461 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
462 domctl.domain = (domid_t)domid;
463 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
465 rc = do_domctl(xc_handle, &domctl);
467 memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));
469 return rc;
470 }
472 int xc_domain_ioport_permission(int xc_handle,
473 uint32_t domid,
474 uint32_t first_port,
475 uint32_t nr_ports,
476 uint32_t allow_access)
477 {
478 DECLARE_DOMCTL;
480 domctl.cmd = XEN_DOMCTL_ioport_permission;
481 domctl.domain = (domid_t)domid;
482 domctl.u.ioport_permission.first_port = first_port;
483 domctl.u.ioport_permission.nr_ports = nr_ports;
484 domctl.u.ioport_permission.allow_access = allow_access;
486 return do_domctl(xc_handle, &domctl);
487 }
489 int xc_vcpu_setcontext(int xc_handle,
490 uint32_t domid,
491 uint32_t vcpu,
492 vcpu_guest_context_t *ctxt)
493 {
494 DECLARE_DOMCTL;
495 int rc;
497 domctl.cmd = XEN_DOMCTL_setvcpucontext;
498 domctl.domain = domid;
499 domctl.u.vcpucontext.vcpu = vcpu;
500 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
502 if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 )
503 return rc;
505 rc = do_domctl(xc_handle, &domctl);
507 unlock_pages(ctxt, sizeof(*ctxt));
509 return rc;
511 }
513 int xc_domain_irq_permission(int xc_handle,
514 uint32_t domid,
515 uint8_t pirq,
516 uint8_t allow_access)
517 {
518 DECLARE_DOMCTL;
520 domctl.cmd = XEN_DOMCTL_irq_permission;
521 domctl.domain = domid;
522 domctl.u.irq_permission.pirq = pirq;
523 domctl.u.irq_permission.allow_access = allow_access;
525 return do_domctl(xc_handle, &domctl);
526 }
528 int xc_domain_iomem_permission(int xc_handle,
529 uint32_t domid,
530 unsigned long first_mfn,
531 unsigned long nr_mfns,
532 uint8_t allow_access)
533 {
534 DECLARE_DOMCTL;
536 domctl.cmd = XEN_DOMCTL_iomem_permission;
537 domctl.domain = domid;
538 domctl.u.iomem_permission.first_mfn = first_mfn;
539 domctl.u.iomem_permission.nr_mfns = nr_mfns;
540 domctl.u.iomem_permission.allow_access = allow_access;
542 return do_domctl(xc_handle, &domctl);
543 }
545 /*
546 * Local variables:
547 * mode: C
548 * c-set-style: "BSD"
549 * c-basic-offset: 4
550 * tab-width: 4
551 * indent-tabs-mode: nil
552 * End:
553 */