ia64/xen-unstable

view tools/libxc/xc_ptrace_core.c @ 6385:f34e732ed4bf

Xenstore testsuite robustness: save output rather than rerun on failure.
"make check" reruns a test which fails with more verbosity. If the test
fails intermittently, that doesn't work well: save the output and simply
dump it if the test fails.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Aug 23 19:58:59 2005 +0000 (2005-08-23)
parents 48dd03e4b388
children 619e3d6f01b3 3133e64d0462 06d84bf87159
line source
1 #include <sys/ptrace.h>
2 #include <sys/wait.h>
3 #include "xc_private.h"
4 #include <time.h>
7 #define BSD_PAGE_MASK (PAGE_SIZE-1)
8 #define PG_FRAME (~((unsigned long)BSD_PAGE_MASK)
9 #define PDRSHIFT 22
10 #define PSL_T 0x00000100 /* trace enable bit */
12 #define VCPU 0 /* XXX */
14 /*
15 * long
16 * ptrace(enum __ptrace_request request, pid_t pid, void *addr, void *data);
17 */
20 struct gdb_regs {
21 long ebx; /* 0 */
22 long ecx; /* 4 */
23 long edx; /* 8 */
24 long esi; /* 12 */
25 long edi; /* 16 */
26 long ebp; /* 20 */
27 long eax; /* 24 */
28 int xds; /* 28 */
29 int xes; /* 32 */
30 int xfs; /* 36 */
31 int xgs; /* 40 */
32 long orig_eax; /* 44 */
33 long eip; /* 48 */
34 int xcs; /* 52 */
35 long eflags; /* 56 */
36 long esp; /* 60 */
37 int xss; /* 64 */
38 };
40 #define printval(x) printf("%s = %lx\n", #x, (long)x);
41 #define SET_PT_REGS(pt, xc) \
42 { \
43 pt.ebx = xc.ebx; \
44 pt.ecx = xc.ecx; \
45 pt.edx = xc.edx; \
46 pt.esi = xc.esi; \
47 pt.edi = xc.edi; \
48 pt.ebp = xc.ebp; \
49 pt.eax = xc.eax; \
50 pt.eip = xc.eip; \
51 pt.xcs = xc.cs; \
52 pt.eflags = xc.eflags; \
53 pt.esp = xc.esp; \
54 pt.xss = xc.ss; \
55 pt.xes = xc.es; \
56 pt.xds = xc.ds; \
57 pt.xfs = xc.fs; \
58 pt.xgs = xc.gs; \
59 }
61 #define SET_XC_REGS(pt, xc) \
62 { \
63 xc.ebx = pt->ebx; \
64 xc.ecx = pt->ecx; \
65 xc.edx = pt->edx; \
66 xc.esi = pt->esi; \
67 xc.edi = pt->edi; \
68 xc.ebp = pt->ebp; \
69 xc.eax = pt->eax; \
70 xc.eip = pt->eip; \
71 xc.cs = pt->xcs; \
72 xc.eflags = pt->eflags; \
73 xc.esp = pt->esp; \
74 xc.ss = pt->xss; \
75 xc.es = pt->xes; \
76 xc.ds = pt->xds; \
77 xc.fs = pt->xfs; \
78 xc.gs = pt->xgs; \
79 }
82 #define vtopdi(va) ((va) >> PDRSHIFT)
83 #define vtopti(va) (((va) >> PAGE_SHIFT) & 0x3ff)
85 /* XXX application state */
88 static long nr_pages = 0;
89 static unsigned long *p2m_array = NULL;
90 static unsigned long *m2p_array = NULL;
91 static unsigned long pages_offset;
92 static unsigned long cr3[MAX_VIRT_CPUS];
93 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
95 /* --------------------- */
97 static unsigned long
98 map_mtop_offset(unsigned long ma)
99 {
100 return pages_offset + (m2p_array[ma >> PAGE_SHIFT] << PAGE_SHIFT);
101 }
104 static void *
105 map_domain_va(unsigned long domfd, int cpu, void * guest_va)
106 {
107 unsigned long pde, page;
108 unsigned long va = (unsigned long)guest_va;
109 void *v;
111 static unsigned long cr3_phys[MAX_VIRT_CPUS];
112 static unsigned long *cr3_virt[MAX_VIRT_CPUS];
113 static unsigned long pde_phys[MAX_VIRT_CPUS];
114 static unsigned long *pde_virt[MAX_VIRT_CPUS];
115 static unsigned long page_phys[MAX_VIRT_CPUS];
116 static unsigned long *page_virt[MAX_VIRT_CPUS];
118 if (cr3[cpu] != cr3_phys[cpu])
119 {
120 cr3_phys[cpu] = cr3[cpu];
121 if (cr3_virt[cpu])
122 munmap(cr3_virt[cpu], PAGE_SIZE);
123 v = mmap(
124 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
125 map_mtop_offset(cr3_phys[cpu]));
126 if (v == MAP_FAILED)
127 {
128 perror("mmap failed");
129 goto error_out;
130 }
131 cr3_virt[cpu] = v;
132 }
133 if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
134 goto error_out;
135 if (ctxt[cpu].flags & VGCF_VMX_GUEST)
136 pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
137 if (pde != pde_phys[cpu])
138 {
139 pde_phys[cpu] = pde;
140 if (pde_virt[cpu])
141 munmap(pde_virt[cpu], PAGE_SIZE);
142 v = mmap(
143 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
144 map_mtop_offset(pde_phys[cpu]));
145 if (v == MAP_FAILED)
146 goto error_out;
147 pde_virt[cpu] = v;
148 }
149 if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
150 goto error_out;
151 if (ctxt[cpu].flags & VGCF_VMX_GUEST)
152 page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
153 if (page != page_phys[cpu])
154 {
155 page_phys[cpu] = page;
156 if (page_virt[cpu])
157 munmap(page_virt[cpu], PAGE_SIZE);
158 v = mmap(
159 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
160 map_mtop_offset(page_phys[cpu]));
161 if (v == MAP_FAILED) {
162 printf("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, vtopti(va));
163 page_phys[cpu] = 0;
164 goto error_out;
165 }
166 page_virt[cpu] = v;
167 }
168 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
170 error_out:
171 return 0;
172 }
174 int
175 xc_waitdomain_core(int domfd, int *status, int options)
176 {
177 int retval = -1;
178 int nr_vcpus;
179 int i;
180 xc_core_header_t header;
182 if (nr_pages == 0) {
184 if (read(domfd, &header, sizeof(header)) != sizeof(header))
185 return -1;
187 nr_pages = header.xch_nr_pages;
188 nr_vcpus = header.xch_nr_vcpus;
189 pages_offset = header.xch_pages_offset;
191 if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
192 sizeof(vcpu_guest_context_t)*nr_vcpus)
193 return -1;
195 for (i = 0; i < nr_vcpus; i++) {
196 cr3[i] = ctxt[i].ctrlreg[3];
197 }
198 if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
199 printf("Could not allocate p2m_array\n");
200 goto error_out;
201 }
202 if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
203 sizeof(unsigned long)*nr_pages)
204 return -1;
206 if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL) {
207 printf("Could not allocate m2p array\n");
208 goto error_out;
209 }
210 bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
212 for (i = 0; i < nr_pages; i++) {
213 m2p_array[p2m_array[i]] = i;
214 }
216 }
217 retval = 0;
218 error_out:
219 return retval;
221 }
223 long
224 xc_ptrace_core(enum __ptrace_request request, u32 domfd, long eaddr, long edata)
225 {
226 int status = 0;
227 struct gdb_regs pt;
228 long retval = 0;
229 unsigned long *guest_va;
230 int cpu = VCPU;
231 void *addr = (char *)eaddr;
232 void *data = (char *)edata;
234 #if 0
235 printf("%20s %d, %p, %p \n", ptrace_names[request], domid, addr, data);
236 #endif
237 switch (request) {
238 case PTRACE_PEEKTEXT:
239 case PTRACE_PEEKDATA:
240 if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) {
241 status = EFAULT;
242 goto error_out;
243 }
245 retval = *guest_va;
246 break;
247 case PTRACE_POKETEXT:
248 case PTRACE_POKEDATA:
249 if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) {
250 status = EFAULT;
251 goto error_out;
252 }
253 *guest_va = (unsigned long)data;
254 break;
255 case PTRACE_GETREGS:
256 case PTRACE_GETFPREGS:
257 case PTRACE_GETFPXREGS:
258 if (request == PTRACE_GETREGS) {
259 SET_PT_REGS(pt, ctxt[cpu].user_regs);
260 memcpy(data, &pt, sizeof(struct gdb_regs));
261 } else if (request == PTRACE_GETFPREGS)
262 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
263 else /*if (request == PTRACE_GETFPXREGS)*/
264 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
265 break;
266 case PTRACE_ATTACH:
267 retval = 0;
268 break;
269 case PTRACE_SETREGS:
270 case PTRACE_SINGLESTEP:
271 case PTRACE_CONT:
272 case PTRACE_DETACH:
273 case PTRACE_SETFPREGS:
274 case PTRACE_SETFPXREGS:
275 case PTRACE_PEEKUSER:
276 case PTRACE_POKEUSER:
277 case PTRACE_SYSCALL:
278 case PTRACE_KILL:
279 #ifdef DEBUG
280 printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
281 #endif
282 status = ENOSYS;
283 break;
284 case PTRACE_TRACEME:
285 printf("PTRACE_TRACEME is an invalid request under Xen\n");
286 status = EINVAL;
287 }
289 if (status) {
290 errno = status;
291 retval = -1;
292 }
293 error_out:
294 return retval;
295 }