ia64/xen-unstable

view tools/libxc/xc_misc.c @ 17571:b6aa55ca599e

shadow: track video RAM dirty bits

This adds a new HVM op that enables tracking dirty bits of a range of
video RAM. The idea is to optimize just for the most common case
(only one guest mapping, with sometimes some temporary other
mappings), which permits to keep the overhead on shadow as low as
possible.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 02 15:08:27 2008 +0100 (2008-05-02)
parents 3bb94bb35dad
children 469d9b00382d
line source
1 /******************************************************************************
2 * xc_misc.c
3 *
4 * Miscellaneous control interface functions.
5 */
7 #include "xc_private.h"
8 #include <xen/hvm/hvm_op.h>
10 int xc_readconsolering(int xc_handle,
11 char **pbuffer,
12 unsigned int *pnr_chars,
13 int clear, int incremental, uint32_t *pindex)
14 {
15 int ret;
16 DECLARE_SYSCTL;
17 char *buffer = *pbuffer;
18 unsigned int nr_chars = *pnr_chars;
20 sysctl.cmd = XEN_SYSCTL_readconsole;
21 set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
22 sysctl.u.readconsole.count = nr_chars;
23 sysctl.u.readconsole.clear = clear;
24 sysctl.u.readconsole.incremental = 0;
25 if ( pindex )
26 {
27 sysctl.u.readconsole.index = *pindex;
28 sysctl.u.readconsole.incremental = incremental;
29 }
31 if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
32 return ret;
34 if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 )
35 {
36 *pnr_chars = sysctl.u.readconsole.count;
37 if ( pindex )
38 *pindex = sysctl.u.readconsole.index;
39 }
41 unlock_pages(buffer, nr_chars);
43 return ret;
44 }
46 int xc_send_debug_keys(int xc_handle, char *keys)
47 {
48 int ret, len = strlen(keys);
49 DECLARE_SYSCTL;
51 sysctl.cmd = XEN_SYSCTL_debug_keys;
52 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
53 sysctl.u.debug_keys.nr_keys = len;
55 if ( (ret = lock_pages(keys, len)) != 0 )
56 return ret;
58 ret = do_sysctl(xc_handle, &sysctl);
60 unlock_pages(keys, len);
62 return ret;
63 }
65 int xc_physinfo(int xc_handle,
66 xc_physinfo_t *put_info)
67 {
68 int ret;
69 DECLARE_SYSCTL;
71 sysctl.cmd = XEN_SYSCTL_physinfo;
73 memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
75 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
76 return ret;
78 memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
80 return 0;
81 }
83 int xc_sched_id(int xc_handle,
84 int *sched_id)
85 {
86 int ret;
87 DECLARE_SYSCTL;
89 sysctl.cmd = XEN_SYSCTL_sched_id;
91 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
92 return ret;
94 *sched_id = sysctl.u.sched_id.sched_id;
96 return 0;
97 }
99 int xc_perfc_control(int xc_handle,
100 uint32_t opcode,
101 xc_perfc_desc_t *desc,
102 xc_perfc_val_t *val,
103 int *nbr_desc,
104 int *nbr_val)
105 {
106 int rc;
107 DECLARE_SYSCTL;
109 sysctl.cmd = XEN_SYSCTL_perfc_op;
110 sysctl.u.perfc_op.cmd = opcode;
111 set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
112 set_xen_guest_handle(sysctl.u.perfc_op.val, val);
114 rc = do_sysctl(xc_handle, &sysctl);
116 if ( nbr_desc )
117 *nbr_desc = sysctl.u.perfc_op.nr_counters;
118 if ( nbr_val )
119 *nbr_val = sysctl.u.perfc_op.nr_vals;
121 return rc;
122 }
124 int xc_getcpuinfo(int xc_handle, int max_cpus,
125 xc_cpuinfo_t *info, int *nr_cpus)
126 {
127 int rc;
128 DECLARE_SYSCTL;
130 sysctl.cmd = XEN_SYSCTL_getcpuinfo;
131 sysctl.u.getcpuinfo.max_cpus = max_cpus;
132 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
134 if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 )
135 return rc;
137 rc = do_sysctl(xc_handle, &sysctl);
139 unlock_pages(info, max_cpus*sizeof(*info));
141 if ( nr_cpus )
142 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
144 return rc;
145 }
148 int xc_hvm_set_pci_intx_level(
149 int xc_handle, domid_t dom,
150 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
151 unsigned int level)
152 {
153 DECLARE_HYPERCALL;
154 struct xen_hvm_set_pci_intx_level arg;
155 int rc;
157 hypercall.op = __HYPERVISOR_hvm_op;
158 hypercall.arg[0] = HVMOP_set_pci_intx_level;
159 hypercall.arg[1] = (unsigned long)&arg;
161 arg.domid = dom;
162 arg.domain = domain;
163 arg.bus = bus;
164 arg.device = device;
165 arg.intx = intx;
166 arg.level = level;
168 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
169 {
170 PERROR("Could not lock memory");
171 return rc;
172 }
174 rc = do_xen_hypercall(xc_handle, &hypercall);
176 unlock_pages(&arg, sizeof(arg));
178 return rc;
179 }
181 int xc_hvm_set_isa_irq_level(
182 int xc_handle, domid_t dom,
183 uint8_t isa_irq,
184 unsigned int level)
185 {
186 DECLARE_HYPERCALL;
187 struct xen_hvm_set_isa_irq_level arg;
188 int rc;
190 hypercall.op = __HYPERVISOR_hvm_op;
191 hypercall.arg[0] = HVMOP_set_isa_irq_level;
192 hypercall.arg[1] = (unsigned long)&arg;
194 arg.domid = dom;
195 arg.isa_irq = isa_irq;
196 arg.level = level;
198 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
199 {
200 PERROR("Could not lock memory");
201 return rc;
202 }
204 rc = do_xen_hypercall(xc_handle, &hypercall);
206 unlock_pages(&arg, sizeof(arg));
208 return rc;
209 }
211 int xc_hvm_set_pci_link_route(
212 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq)
213 {
214 DECLARE_HYPERCALL;
215 struct xen_hvm_set_pci_link_route arg;
216 int rc;
218 hypercall.op = __HYPERVISOR_hvm_op;
219 hypercall.arg[0] = HVMOP_set_pci_link_route;
220 hypercall.arg[1] = (unsigned long)&arg;
222 arg.domid = dom;
223 arg.link = link;
224 arg.isa_irq = isa_irq;
226 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
227 {
228 PERROR("Could not lock memory");
229 return rc;
230 }
232 rc = do_xen_hypercall(xc_handle, &hypercall);
234 unlock_pages(&arg, sizeof(arg));
236 return rc;
237 }
239 int xc_hvm_track_dirty_vram(
240 int xc_handle, domid_t dom,
241 uint64_t first_pfn, uint64_t nr,
242 unsigned long *dirty_bitmap)
243 {
244 DECLARE_HYPERCALL;
245 struct xen_hvm_track_dirty_vram arg;
246 int rc;
248 hypercall.op = __HYPERVISOR_hvm_op;
249 hypercall.arg[0] = HVMOP_track_dirty_vram;
250 hypercall.arg[1] = (unsigned long)&arg;
252 arg.domid = dom;
253 arg.first_pfn = first_pfn;
254 arg.nr = nr;
255 set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap);
257 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
258 {
259 PERROR("Could not lock memory");
260 return rc;
261 }
263 rc = do_xen_hypercall(xc_handle, &hypercall);
265 unlock_pages(&arg, sizeof(arg));
267 return rc;
268 }
270 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
271 const xen_pfn_t *arr, int num)
272 {
273 xen_pfn_t *pfn;
274 void *res;
275 int i;
277 pfn = malloc(num * sizeof(*pfn));
278 if (!pfn)
279 return NULL;
280 memcpy(pfn, arr, num * sizeof(*pfn));
282 res = xc_map_foreign_batch(xc_handle, dom, prot, pfn, num);
283 if (res) {
284 for (i = 0; i < num; i++) {
285 if ((pfn[i] & 0xF0000000UL) == 0xF0000000UL) {
286 /*
287 * xc_map_foreign_batch() doesn't give us an error
288 * code, so we have to make one up. May not be the
289 * appropriate one.
290 */
291 errno = EINVAL;
292 munmap(res, num * PAGE_SIZE);
293 res = NULL;
294 break;
295 }
296 }
297 }
299 free(pfn);
300 return res;
301 }
303 /*
304 * Local variables:
305 * mode: C
306 * c-set-style: "BSD"
307 * c-basic-offset: 4
308 * tab-width: 4
309 * indent-tabs-mode: nil
310 * End:
311 */