ia64/xen-unstable

view tools/libxc/xc_private.c @ 1820:3d4f8eb89670

bitkeeper revision 1.1106.1.2 (40faa780dekT3E5arFwcbQDu1MbX6g)

Cleaned up Xen's instruction emulator.
author kaf24@scramble.cl.cam.ac.uk
date Sun Jul 18 16:38:24 2004 +0000 (2004-07-18)
parents 7ee821f4caea
children a989641f2755 bd1640d9d7d4 994a7468bb63 0a4b76b6b5a0
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include "xc_private.h"
9 #define MAX_EXTENTS 8
10 typedef struct {
11 int fd;
12 struct {
13 void *base;
14 unsigned long length;
15 } extent[MAX_EXTENTS];
16 } mapper_desc_t;
18 void *init_pfn_mapper(domid_t domid)
19 {
20 int fd = open("/dev/mem", O_RDWR);
21 mapper_desc_t *desc;
23 if ( fd < 0 )
24 return NULL;
26 if ( (desc = malloc(sizeof(*desc))) == NULL )
27 {
28 close(fd);
29 return NULL;
30 }
32 (void)ioctl(fd, _IO('M', 1), (unsigned long)domid);
34 memset(desc, 0, sizeof(*desc));
35 desc->fd = fd;
37 return desc;
38 }
40 int close_pfn_mapper(void *pm_handle)
41 {
42 mapper_desc_t *desc = pm_handle;
43 int i;
45 for ( i = 0; i < MAX_EXTENTS; i++ )
46 {
47 if ( desc->extent[i].base != NULL )
48 (void)munmap(desc->extent[i].base, desc->extent[i].length);
49 }
51 close(desc->fd);
52 free(desc);
54 return 0;
55 }
57 static int get_free_offset(mapper_desc_t *desc)
58 {
59 int i;
61 for ( i = 0; i < MAX_EXTENTS; i++ )
62 {
63 if ( desc->extent[i].base == NULL )
64 break;
65 }
67 if ( i == MAX_EXTENTS )
68 {
69 fprintf(stderr, "Extent overflow in map_pfn_*()!\n");
70 fflush(stderr);
71 *(int*)0=0; /* XXX */
72 }
74 return i;
75 }
77 void *map_pfn_writeable(void *pm_handle, unsigned long pfn)
78 {
79 mapper_desc_t *desc = pm_handle;
80 void *vaddr;
81 int off;
83 vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
84 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
85 if ( vaddr == MAP_FAILED )
86 return NULL;
88 off = get_free_offset(desc);
89 desc->extent[off].base = vaddr;
90 desc->extent[off].length = PAGE_SIZE;
92 return vaddr;
93 }
95 void *map_pfn_readonly(void *pm_handle, unsigned long pfn)
96 {
97 mapper_desc_t *desc = pm_handle;
98 void *vaddr;
99 int off;
101 vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
102 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
103 if ( vaddr == MAP_FAILED )
104 return NULL;
106 off = get_free_offset(desc);
107 desc->extent[off].base = vaddr;
108 desc->extent[off].length = PAGE_SIZE;
110 return vaddr;
111 }
113 void unmap_pfn(void *pm_handle, void *vaddr)
114 {
115 mapper_desc_t *desc = pm_handle;
116 int i;
117 unsigned long len = 0;
119 for ( i = 0; i < MAX_EXTENTS; i++ )
120 {
121 if ( desc->extent[i].base == vaddr )
122 {
123 desc->extent[i].base = NULL;
124 len = desc->extent[i].length;
125 }
126 }
128 if ( len == 0 )
129 *(int*)0 = 0; /* XXX */
131 (void)munmap(vaddr, len);
132 }
134 /*******************/
136 void *mfn_mapper_map_batch(int xc_handle, domid_t dom, int prot,
137 unsigned long *arr, int num )
138 {
139 privcmd_mmapbatch_t ioctlx;
140 void *addr;
141 addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0);
142 if ( addr != NULL )
143 {
144 ioctlx.num=num;
145 ioctlx.dom=dom;
146 ioctlx.addr=(unsigned long)addr;
147 ioctlx.arr=arr;
148 if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx ) < 0 )
149 {
150 perror("XXXXXXXX");
151 munmap(addr, num*PAGE_SIZE);
152 return 0;
153 }
154 }
155 return addr;
157 }
159 /*******************/
161 void *mfn_mapper_map_single(int xc_handle, domid_t dom,
162 int size, int prot,
163 unsigned long mfn )
164 {
165 privcmd_mmap_t ioctlx;
166 privcmd_mmap_entry_t entry;
167 void *addr;
168 addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0);
169 if ( addr != NULL )
170 {
171 ioctlx.num=1;
172 ioctlx.dom=dom;
173 ioctlx.entry=&entry;
174 entry.va=(unsigned long) addr;
175 entry.mfn=mfn;
176 entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT;
177 if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 )
178 {
179 munmap(addr, size);
180 return 0;
181 }
182 }
183 return addr;
184 }
186 /*******************/
188 /* NB: arr must be mlock'ed */
189 int get_pfn_type_batch(int xc_handle,
190 u32 dom, int num, unsigned long *arr)
191 {
192 dom0_op_t op;
193 op.cmd = DOM0_GETPAGEFRAMEINFO2;
194 op.u.getpageframeinfo2.domain = (domid_t)dom;
195 op.u.getpageframeinfo2.num = num;
196 op.u.getpageframeinfo2.array = arr;
197 return do_dom0_op(xc_handle, &op);
198 }
200 #define GETPFN_ERR (~0U)
201 unsigned int get_pfn_type(int xc_handle,
202 unsigned long mfn,
203 u32 dom)
204 {
205 dom0_op_t op;
206 op.cmd = DOM0_GETPAGEFRAMEINFO;
207 op.u.getpageframeinfo.pfn = mfn;
208 op.u.getpageframeinfo.domain = (domid_t)dom;
209 if ( do_dom0_op(xc_handle, &op) < 0 )
210 {
211 PERROR("Unexpected failure when getting page frame info!");
212 return GETPFN_ERR;
213 }
214 return op.u.getpageframeinfo.type;
215 }
219 /*******************/
221 #define FIRST_MMU_UPDATE 1
223 static int flush_mmu_updates(int xc_handle, mmu_t *mmu)
224 {
225 int err = 0;
226 privcmd_hypercall_t hypercall;
228 if ( mmu->idx == FIRST_MMU_UPDATE )
229 return 0;
231 /* The first two requests set the correct subject domain (PTS and GPS). */
232 mmu->updates[0].val = (unsigned long)(mmu->subject<<16) & ~0xFFFFUL;
233 mmu->updates[0].ptr = (unsigned long)(mmu->subject<< 0) & ~0xFFFFUL;
234 mmu->updates[0].ptr |= MMU_EXTENDED_COMMAND;
235 mmu->updates[0].val |= MMUEXT_SET_SUBJECTDOM | SET_PAGETABLE_SUBJECTDOM;
237 hypercall.op = __HYPERVISOR_mmu_update;
238 hypercall.arg[0] = (unsigned long)mmu->updates;
239 hypercall.arg[1] = (unsigned long)mmu->idx;
240 hypercall.arg[2] = 0;
242 if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
243 {
244 PERROR("Could not lock pagetable update array");
245 err = 1;
246 goto out;
247 }
249 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
250 {
251 ERROR("Failure when submitting mmu updates");
252 err = 1;
253 }
255 mmu->idx = FIRST_MMU_UPDATE;
257 (void)munlock(mmu->updates, sizeof(mmu->updates));
259 out:
260 return err;
261 }
263 mmu_t *init_mmu_updates(int xc_handle, domid_t dom)
264 {
265 mmu_t *mmu = malloc(sizeof(mmu_t));
266 if ( mmu == NULL )
267 return mmu;
268 mmu->idx = FIRST_MMU_UPDATE;
269 mmu->subject = dom;
270 return mmu;
271 }
273 int add_mmu_update(int xc_handle, mmu_t *mmu,
274 unsigned long ptr, unsigned long val)
275 {
276 mmu->updates[mmu->idx].ptr = ptr;
277 mmu->updates[mmu->idx].val = val;
279 if ( ++mmu->idx == MAX_MMU_UPDATES )
280 return flush_mmu_updates(xc_handle, mmu);
282 return 0;
283 }
285 int finish_mmu_updates(int xc_handle, mmu_t *mmu)
286 {
287 return flush_mmu_updates(xc_handle, mmu);
288 }
291 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid )
292 {
293 dom0_op_t op;
295 op.cmd = DOM0_GETDOMAININFO;
296 op.u.getdomaininfo.domain = (domid_t)domid;
297 op.u.getdomaininfo.ctxt = NULL;
298 if ( (do_dom0_op(xc_handle, &op) < 0) ||
299 ((u32)op.u.getdomaininfo.domain != domid) )
300 {
301 PERROR("Could not get info on domain");
302 return -1;
303 }
304 return op.u.getdomaininfo.cpu_time;
305 }
308 /* This is shared between save and restore, and may generally be useful. */
309 unsigned long csum_page (void * page)
310 {
311 int i;
312 unsigned long *p = page;
313 unsigned long long sum=0;
315 for ( i = 0; i < (PAGE_SIZE/sizeof(unsigned long)); i++ )
316 sum += p[i];
318 return sum ^ (sum>>32);
319 }