ia64/xen-unstable

view tools/libxc/xc_private.c @ 6385:f34e732ed4bf

Xenstore testsuite robustness: save output rather than rerun on failure.
"make check" reruns a test which fails with more verbosity. If the test
fails intermittently, that doesn't work well: save the output and simply
dump it if the test fails.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Aug 23 19:58:59 2005 +0000 (2005-08-23)
parents f294acb25858
children fdfd511768a3 2f20c2fce2c5 e3d811cca4e1 1ae656509f02 23979fb12c49 84ee014ebd41 99914b54f7bf 81576d3d1ca8 3a8f27c6d56c cc5f88b719d0 fa0754a9f64f
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <zlib.h>
8 #include "xc_private.h"
10 void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot,
11 unsigned long *arr, int num )
12 {
13 privcmd_mmapbatch_t ioctlx;
14 void *addr;
15 addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0);
16 if ( addr == MAP_FAILED )
17 return NULL;
19 ioctlx.num=num;
20 ioctlx.dom=dom;
21 ioctlx.addr=(unsigned long)addr;
22 ioctlx.arr=arr;
23 if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx ) < 0 )
24 {
25 int saved_errno = errno;
26 perror("XXXXXXXX");
27 (void)munmap(addr, num*PAGE_SIZE);
28 errno = saved_errno;
29 return NULL;
30 }
31 return addr;
33 }
35 /*******************/
37 void *xc_map_foreign_range(int xc_handle, u32 dom,
38 int size, int prot,
39 unsigned long mfn )
40 {
41 privcmd_mmap_t ioctlx;
42 privcmd_mmap_entry_t entry;
43 void *addr;
44 addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0);
45 if ( addr == MAP_FAILED )
46 return NULL;
48 ioctlx.num=1;
49 ioctlx.dom=dom;
50 ioctlx.entry=&entry;
51 entry.va=(unsigned long) addr;
52 entry.mfn=mfn;
53 entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT;
54 if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 )
55 {
56 int saved_errno = errno;
57 (void)munmap(addr, size);
58 errno = saved_errno;
59 return NULL;
60 }
61 return addr;
62 }
64 /*******************/
66 /* NB: arr must be mlock'ed */
67 int get_pfn_type_batch(int xc_handle,
68 u32 dom, int num, unsigned long *arr)
69 {
70 dom0_op_t op;
71 op.cmd = DOM0_GETPAGEFRAMEINFO2;
72 op.u.getpageframeinfo2.domain = (domid_t)dom;
73 op.u.getpageframeinfo2.num = num;
74 op.u.getpageframeinfo2.array = arr;
75 return do_dom0_op(xc_handle, &op);
76 }
78 #define GETPFN_ERR (~0U)
79 unsigned int get_pfn_type(int xc_handle,
80 unsigned long mfn,
81 u32 dom)
82 {
83 dom0_op_t op;
84 op.cmd = DOM0_GETPAGEFRAMEINFO;
85 op.u.getpageframeinfo.pfn = mfn;
86 op.u.getpageframeinfo.domain = (domid_t)dom;
87 if ( do_dom0_op(xc_handle, &op) < 0 )
88 {
89 PERROR("Unexpected failure when getting page frame info!");
90 return GETPFN_ERR;
91 }
92 return op.u.getpageframeinfo.type;
93 }
97 /*******************/
99 int pin_table(
100 int xc_handle, unsigned int type, unsigned long mfn, domid_t dom)
101 {
102 struct mmuext_op op;
104 op.cmd = type;
105 op.mfn = mfn;
107 if ( do_mmuext_op(xc_handle, &op, 1, dom) < 0 )
108 return 1;
110 return 0;
111 }
113 static int flush_mmu_updates(int xc_handle, mmu_t *mmu)
114 {
115 int err = 0;
116 privcmd_hypercall_t hypercall;
118 if ( mmu->idx == 0 )
119 return 0;
121 hypercall.op = __HYPERVISOR_mmu_update;
122 hypercall.arg[0] = (unsigned long)mmu->updates;
123 hypercall.arg[1] = (unsigned long)mmu->idx;
124 hypercall.arg[2] = 0;
125 hypercall.arg[3] = mmu->subject;
127 if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
128 {
129 PERROR("flush_mmu_updates: mmu updates mlock failed");
130 err = 1;
131 goto out;
132 }
134 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
135 {
136 ERROR("Failure when submitting mmu updates");
137 err = 1;
138 }
140 mmu->idx = 0;
142 safe_munlock(mmu->updates, sizeof(mmu->updates));
144 out:
145 return err;
146 }
148 mmu_t *init_mmu_updates(int xc_handle, domid_t dom)
149 {
150 mmu_t *mmu = malloc(sizeof(mmu_t));
151 if ( mmu == NULL )
152 return mmu;
153 mmu->idx = 0;
154 mmu->subject = dom;
155 return mmu;
156 }
158 int add_mmu_update(int xc_handle, mmu_t *mmu,
159 unsigned long ptr, unsigned long val)
160 {
161 mmu->updates[mmu->idx].ptr = ptr;
162 mmu->updates[mmu->idx].val = val;
164 if ( ++mmu->idx == MAX_MMU_UPDATES )
165 return flush_mmu_updates(xc_handle, mmu);
167 return 0;
168 }
170 int finish_mmu_updates(int xc_handle, mmu_t *mmu)
171 {
172 return flush_mmu_updates(xc_handle, mmu);
173 }
176 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
177 {
178 dom0_op_t op;
180 op.cmd = DOM0_GETVCPUCONTEXT;
181 op.u.getvcpucontext.domain = (domid_t)domid;
182 op.u.getvcpucontext.vcpu = (u16)vcpu;
183 op.u.getvcpucontext.ctxt = NULL;
184 if ( (do_dom0_op(xc_handle, &op) < 0) )
185 {
186 PERROR("Could not get info on domain");
187 return -1;
188 }
189 return op.u.getvcpucontext.cpu_time;
190 }
193 /* This is shared between save and restore, and may generally be useful. */
194 unsigned long csum_page (void * page)
195 {
196 int i;
197 unsigned long *p = page;
198 unsigned long long sum=0;
200 for ( i = 0; i < (PAGE_SIZE/sizeof(unsigned long)); i++ )
201 sum += p[i];
203 return sum ^ (sum>>32);
204 }
206 unsigned long xc_get_m2p_start_mfn ( int xc_handle )
207 {
208 unsigned long mfn;
210 if ( ioctl( xc_handle, IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN, &mfn ) < 0 )
211 {
212 perror("xc_get_m2p_start_mfn:");
213 return 0;
214 }
215 return mfn;
216 }
218 int xc_get_pfn_list(int xc_handle,
219 u32 domid,
220 unsigned long *pfn_buf,
221 unsigned long max_pfns)
222 {
223 dom0_op_t op;
224 int ret;
225 op.cmd = DOM0_GETMEMLIST;
226 op.u.getmemlist.domain = (domid_t)domid;
227 op.u.getmemlist.max_pfns = max_pfns;
228 op.u.getmemlist.buffer = pfn_buf;
231 if ( mlock(pfn_buf, max_pfns * sizeof(unsigned long)) != 0 )
232 {
233 PERROR("xc_get_pfn_list: pfn_buf mlock failed");
234 return -1;
235 }
237 ret = do_dom0_op(xc_handle, &op);
239 safe_munlock(pfn_buf, max_pfns * sizeof(unsigned long));
241 #if 0
242 #ifdef DEBUG
243 DPRINTF(("Ret for xc_get_pfn_list is %d\n", ret));
244 if (ret >= 0) {
245 int i, j;
246 for (i = 0; i < op.u.getmemlist.num_pfns; i += 16) {
247 fprintf(stderr, "0x%x: ", i);
248 for (j = 0; j < 16; j++)
249 fprintf(stderr, "0x%lx ", pfn_buf[i + j]);
250 fprintf(stderr, "\n");
251 }
252 }
253 #endif
254 #endif
256 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
257 }
259 #ifdef __ia64__
260 int xc_ia64_get_pfn_list(int xc_handle,
261 u32 domid,
262 unsigned long *pfn_buf,
263 unsigned int start_page,
264 unsigned int nr_pages)
265 {
266 dom0_op_t op;
267 int ret;
269 op.cmd = DOM0_GETMEMLIST;
270 op.u.getmemlist.domain = (domid_t)domid;
271 op.u.getmemlist.max_pfns = ((unsigned long)start_page << 32) | nr_pages;
272 op.u.getmemlist.buffer = pfn_buf;
274 if ( mlock(pfn_buf, nr_pages * sizeof(unsigned long)) != 0 )
275 {
276 PERROR("Could not lock pfn list buffer");
277 return -1;
278 }
280 /* XXX Hack to put pages in TLB, hypervisor should be able to handle this */
281 memset(pfn_buf, 0, nr_pages * sizeof(unsigned long));
282 ret = do_dom0_op(xc_handle, &op);
284 (void)munlock(pfn_buf, nr_pages * sizeof(unsigned long));
286 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
287 }
288 #endif
290 long xc_get_tot_pages(int xc_handle, u32 domid)
291 {
292 dom0_op_t op;
293 op.cmd = DOM0_GETDOMAININFO;
294 op.u.getdomaininfo.domain = (domid_t)domid;
295 return (do_dom0_op(xc_handle, &op) < 0) ?
296 -1 : op.u.getdomaininfo.tot_pages;
297 }
299 int xc_copy_to_domain_page(int xc_handle,
300 u32 domid,
301 unsigned long dst_pfn,
302 void *src_page)
303 {
304 void *vaddr = xc_map_foreign_range(
305 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
306 if ( vaddr == NULL )
307 return -1;
308 memcpy(vaddr, src_page, PAGE_SIZE);
309 munmap(vaddr, PAGE_SIZE);
310 return 0;
311 }
313 unsigned long xc_get_filesz(int fd)
314 {
315 u16 sig;
316 u32 _sz = 0;
317 unsigned long sz;
319 lseek(fd, 0, SEEK_SET);
320 if ( read(fd, &sig, sizeof(sig)) != sizeof(sig) )
321 return 0;
322 sz = lseek(fd, 0, SEEK_END);
323 if ( sig == 0x8b1f ) /* GZIP signature? */
324 {
325 lseek(fd, -4, SEEK_END);
326 if ( read(fd, &_sz, 4) != 4 )
327 return 0;
328 sz = _sz;
329 }
330 lseek(fd, 0, SEEK_SET);
332 return sz;
333 }
335 char *xc_read_kernel_image(const char *filename, unsigned long *size)
336 {
337 int kernel_fd = -1;
338 gzFile kernel_gfd = NULL;
339 char *image = NULL;
340 unsigned int bytes;
342 if ( (kernel_fd = open(filename, O_RDONLY)) < 0 )
343 {
344 PERROR("Could not open kernel image");
345 goto out;
346 }
348 if ( (*size = xc_get_filesz(kernel_fd)) == 0 )
349 {
350 PERROR("Could not read kernel image");
351 goto out;
352 }
354 if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL )
355 {
356 PERROR("Could not allocate decompression state for state file");
357 goto out;
358 }
360 if ( (image = malloc(*size)) == NULL )
361 {
362 PERROR("Could not allocate memory for kernel image");
363 goto out;
364 }
366 if ( (bytes = gzread(kernel_gfd, image, *size)) != *size )
367 {
368 PERROR("Error reading kernel image, could not"
369 " read the whole image (%d != %ld).", bytes, *size);
370 free(image);
371 image = NULL;
372 }
374 out:
375 if ( kernel_gfd != NULL )
376 gzclose(kernel_gfd);
377 else if ( kernel_fd >= 0 )
378 close(kernel_fd);
379 return image;
380 }
382 void xc_map_memcpy(unsigned long dst, char *src, unsigned long size,
383 int xch, u32 dom, unsigned long *parray,
384 unsigned long vstart)
385 {
386 char *va;
387 unsigned long chunksz, done, pa;
389 for ( done = 0; done < size; done += chunksz )
390 {
391 pa = dst + done - vstart;
392 va = xc_map_foreign_range(
393 xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
394 chunksz = size - done;
395 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
396 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
397 memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
398 munmap(va, PAGE_SIZE);
399 }
400 }
402 int xc_dom0_op(int xc_handle, dom0_op_t *op)
403 {
404 return do_dom0_op(xc_handle, op);
405 }