ia64/xen-unstable

view tools/libxc/xc_ptrace.c @ 10056:d056f91cfd95

Fix 64-bit build. xc_ptrace() is still broken for x86/64 however.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun May 14 20:13:14 2006 +0100 (2006-05-14)
parents 8e6835fa7c4b
children 1855124935e2
line source
1 #define XC_PTRACE_PRIVATE
3 #include <sys/ptrace.h>
4 #include <sys/wait.h>
5 #include <time.h>
7 #include "xc_private.h"
8 #include "xg_private.h"
9 #include "xc_ptrace.h"
11 #ifdef DEBUG
12 static char *ptrace_names[] = {
13 "PTRACE_TRACEME",
14 "PTRACE_PEEKTEXT",
15 "PTRACE_PEEKDATA",
16 "PTRACE_PEEKUSER",
17 "PTRACE_POKETEXT",
18 "PTRACE_POKEDATA",
19 "PTRACE_POKEUSER",
20 "PTRACE_CONT",
21 "PTRACE_KILL",
22 "PTRACE_SINGLESTEP",
23 "PTRACE_INVALID",
24 "PTRACE_INVALID",
25 "PTRACE_GETREGS",
26 "PTRACE_SETREGS",
27 "PTRACE_GETFPREGS",
28 "PTRACE_SETFPREGS",
29 "PTRACE_ATTACH",
30 "PTRACE_DETACH",
31 "PTRACE_GETFPXREGS",
32 "PTRACE_SETFPXREGS",
33 "PTRACE_INVALID",
34 "PTRACE_INVALID",
35 "PTRACE_INVALID",
36 "PTRACE_INVALID",
37 "PTRACE_SYSCALL",
38 };
39 #endif
41 static int current_domid = -1;
42 static int current_isfile;
44 static cpumap_t online_cpumap;
45 static cpumap_t regs_valid;
46 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
48 extern int ffsll(long long int);
49 #define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
52 static int
53 fetch_regs(int xc_handle, int cpu, int *online)
54 {
55 xc_vcpuinfo_t info;
56 int retval = 0;
58 if (online)
59 *online = 0;
60 if ( !(regs_valid & (1 << cpu)) )
61 {
62 retval = xc_vcpu_getcontext(xc_handle, current_domid,
63 cpu, &ctxt[cpu]);
64 if ( retval )
65 goto done;
66 regs_valid |= (1 << cpu);
68 }
69 if ( online == NULL )
70 goto done;
72 retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
73 *online = info.online;
75 done:
76 return retval;
77 }
79 static struct thr_ev_handlers {
80 thr_ev_handler_t td_create;
81 thr_ev_handler_t td_death;
82 } handlers;
84 void
85 xc_register_event_handler(thr_ev_handler_t h,
86 td_event_e e)
87 {
88 switch (e) {
89 case TD_CREATE:
90 handlers.td_create = h;
91 break;
92 case TD_DEATH:
93 handlers.td_death = h;
94 break;
95 default:
96 abort(); /* XXX */
97 }
98 }
100 static inline int
101 paging_enabled(vcpu_guest_context_t *v)
102 {
103 unsigned long cr0 = v->ctrlreg[0];
104 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
105 }
107 /*
108 * Fetch registers for all online cpus and set the cpumap
109 * to indicate which cpus are online
110 *
111 */
113 static int
114 get_online_cpumap(int xc_handle, dom0_getdomaininfo_t *d, cpumap_t *cpumap)
115 {
116 int i, online, retval;
118 *cpumap = 0;
119 for (i = 0; i <= d->max_vcpu_id; i++) {
120 if ((retval = fetch_regs(xc_handle, i, &online)))
121 return retval;
122 if (online)
123 *cpumap |= (1 << i);
124 }
126 return 0;
127 }
129 /*
130 * Notify GDB of any vcpus that have come online or gone offline
131 * update online_cpumap
132 *
133 */
135 static void
136 online_vcpus_changed(cpumap_t cpumap)
137 {
138 cpumap_t changed_cpumap = cpumap ^ online_cpumap;
139 int index;
141 while ( (index = ffsll(changed_cpumap)) ) {
142 if ( cpumap & (1 << (index - 1)) )
143 {
144 if (handlers.td_create) handlers.td_create(index - 1);
145 } else {
146 printf("thread death: %d\n", index - 1);
147 if (handlers.td_death) handlers.td_death(index - 1);
148 }
149 changed_cpumap &= ~(1 << (index - 1));
150 }
151 online_cpumap = cpumap;
153 }
155 /* --------------------- */
156 /* XXX application state */
157 static long nr_pages = 0;
158 static unsigned long *page_array = NULL;
161 /*
162 * Translates physical addresses to machine addresses for HVM
163 * guests. For paravirtual domains the function will just return the
164 * given address.
165 *
166 * This function should be used when reading page directories/page
167 * tables.
168 *
169 */
170 static unsigned long
171 to_ma(int cpu,
172 unsigned long in_addr)
173 {
174 unsigned long maddr = in_addr;
176 if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
177 maddr = page_array[maddr >> PAGE_SHIFT] << PAGE_SHIFT;
178 return maddr;
179 }
181 static void *
182 map_domain_va_32(
183 int xc_handle,
184 int cpu,
185 void *guest_va,
186 int perm)
187 {
188 unsigned long pde, page;
189 unsigned long va = (unsigned long)guest_va;
191 static unsigned long cr3_phys[MAX_VIRT_CPUS];
192 static uint32_t *cr3_virt[MAX_VIRT_CPUS];
193 static unsigned long pde_phys[MAX_VIRT_CPUS];
194 static uint32_t *pde_virt[MAX_VIRT_CPUS];
195 static unsigned long page_phys[MAX_VIRT_CPUS];
196 static uint32_t *page_virt[MAX_VIRT_CPUS];
197 static int prev_perm[MAX_VIRT_CPUS];
199 if (ctxt[cpu].ctrlreg[3] == 0)
200 return NULL;
201 if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
202 {
203 cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
204 if ( cr3_virt[cpu] )
205 munmap(cr3_virt[cpu], PAGE_SIZE);
206 cr3_virt[cpu] = xc_map_foreign_range(
207 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
208 cr3_phys[cpu] >> PAGE_SHIFT);
209 if ( cr3_virt[cpu] == NULL )
210 return NULL;
211 }
212 pde = to_ma(cpu, cr3_virt[cpu][vtopdi(va)]);
213 if ( pde != pde_phys[cpu] )
214 {
215 pde_phys[cpu] = pde;
216 if ( pde_virt[cpu] )
217 munmap(pde_virt[cpu], PAGE_SIZE);
218 pde_virt[cpu] = xc_map_foreign_range(
219 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
220 pde_phys[cpu] >> PAGE_SHIFT);
221 if ( pde_virt[cpu] == NULL )
222 return NULL;
223 }
224 page = to_ma(cpu, pde_virt[cpu][vtopti(va)]);
226 if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
227 {
228 page_phys[cpu] = page;
229 if ( page_virt[cpu] )
230 munmap(page_virt[cpu], PAGE_SIZE);
231 page_virt[cpu] = xc_map_foreign_range(
232 xc_handle, current_domid, PAGE_SIZE, perm,
233 page_phys[cpu] >> PAGE_SHIFT);
234 if ( page_virt[cpu] == NULL )
235 {
236 page_phys[cpu] = 0;
237 return NULL;
238 }
239 prev_perm[cpu] = perm;
240 }
242 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
243 }
246 static void *
247 map_domain_va_pae(
248 int xc_handle,
249 int cpu,
250 void *guest_va,
251 int perm)
252 {
253 unsigned long l2p, l1p, p, va = (unsigned long)guest_va;
254 uint64_t *l3, *l2, *l1;
255 static void *v;
257 l3 = xc_map_foreign_range(
258 xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
259 if ( l3 == NULL )
260 return NULL;
262 l2p = to_ma(cpu, l3[l3_table_offset_pae(va)]);
263 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
264 munmap(l3, PAGE_SIZE);
265 if ( l2 == NULL )
266 return NULL;
268 l1p = to_ma(cpu, l2[l2_table_offset_pae(va)]);
269 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT);
270 munmap(l2, PAGE_SIZE);
271 if ( l1 == NULL )
272 return NULL;
274 p = to_ma(cpu, l1[l1_table_offset_pae(va)]);
275 if ( v != NULL )
276 munmap(v, PAGE_SIZE);
277 v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
278 munmap(l1, PAGE_SIZE);
279 if ( v == NULL )
280 return NULL;
282 return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
283 }
285 #ifdef __x86_64__
286 static void *
287 map_domain_va_64(
288 int xc_handle,
289 int cpu,
290 void *guest_va,
291 int perm)
292 {
293 unsigned long l3p, l2p, l1p, l1e, p, va = (unsigned long)guest_va;
294 uint64_t *l4, *l3, *l2, *l1;
295 static void *v;
297 if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
298 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
300 l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
301 PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
302 if ( l4 == NULL )
303 return NULL;
305 l3p = to_ma(cpu, l4[l4_table_offset(va)]);
306 l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT);
307 munmap(l4, PAGE_SIZE);
308 if ( l3 == NULL )
309 return NULL;
311 l2p = to_ma(cpu, l3[l3_table_offset(va)]);
312 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
313 munmap(l3, PAGE_SIZE);
314 if ( l2 == NULL )
315 return NULL;
317 l1 = NULL;
318 l1e = to_ma(cpu, l2[l2_table_offset(va)]);
319 l1p = l1e >> PAGE_SHIFT;
320 if (l1e & 0x80) { /* 2M pages */
321 p = to_ma(cpu, (l1p + l1_table_offset(va)) << PAGE_SHIFT);
322 } else { /* 4K pages */
323 //l1p = to_ma(cpu, l1e[l1_table_offset(va)]);
324 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT);
325 munmap(l2, PAGE_SIZE);
326 if ( l1 == NULL )
327 return NULL;
329 p = to_ma(cpu, l1[l1_table_offset(va)]);
330 }
331 if ( v != NULL )
332 munmap(v, PAGE_SIZE);
333 v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
334 if (l1)
335 munmap(l1, PAGE_SIZE);
336 if ( v == NULL )
337 return NULL;
339 return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
340 }
341 #endif
343 static void *
344 map_domain_va(
345 int xc_handle,
346 int cpu,
347 void *guest_va,
348 int perm)
349 {
350 unsigned long va = (unsigned long) guest_va;
351 long npgs = xc_get_tot_pages(xc_handle, current_domid);
352 static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
354 if ( mode == MODE_UNKNOWN )
355 {
356 xen_capabilities_info_t caps;
357 (void)xc_version(xc_handle, XENVER_capabilities, caps);
358 if ( strstr(caps, "-x86_64") )
359 mode = MODE_64;
360 else if ( strstr(caps, "-x86_32p") )
361 mode = MODE_PAE;
362 else if ( strstr(caps, "-x86_32") )
363 mode = MODE_32;
364 }
366 if ( nr_pages != npgs )
367 {
368 if ( nr_pages > 0 )
369 free(page_array);
370 nr_pages = npgs;
371 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
372 {
373 printf("Could not allocate memory\n");
374 return NULL;
375 }
376 if ( xc_get_pfn_list(xc_handle, current_domid,
377 page_array, nr_pages) != nr_pages )
378 {
379 printf("Could not get the page frame list\n");
380 return NULL;
381 }
382 }
384 if (fetch_regs(xc_handle, cpu, NULL))
385 return NULL;
387 if (!paging_enabled(&ctxt[cpu])) {
388 static void * v;
389 unsigned long page;
391 if ( v != NULL )
392 munmap(v, PAGE_SIZE);
394 page = to_ma(cpu, page_array[va >> PAGE_SHIFT]);
396 v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
397 perm, page >> PAGE_SHIFT);
399 if ( v == NULL )
400 return NULL;
402 return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
403 }
404 #ifdef __x86_64__
405 if ( mode == MODE_64 )
406 return map_domain_va_64(xc_handle, cpu, guest_va, perm);
407 #endif
408 if ( mode == MODE_PAE )
409 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
410 /* else ( mode == MODE_32 ) */
411 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
412 }
414 int control_c_pressed_flag = 0;
416 static int
417 __xc_waitdomain(
418 int xc_handle,
419 int domain,
420 int *status,
421 int options)
422 {
423 DECLARE_DOM0_OP;
424 int retval;
425 struct timespec ts;
426 cpumap_t cpumap;
428 ts.tv_sec = 0;
429 ts.tv_nsec = 10*1000*1000;
431 op.cmd = DOM0_GETDOMAININFO;
432 op.u.getdomaininfo.domain = domain;
434 retry:
435 retval = do_dom0_op(xc_handle, &op);
436 if ( retval || (op.u.getdomaininfo.domain != domain) )
437 {
438 printf("getdomaininfo failed\n");
439 goto done;
440 }
441 *status = op.u.getdomaininfo.flags;
443 if ( options & WNOHANG )
444 goto done;
446 if (control_c_pressed_flag) {
447 xc_domain_pause(xc_handle, domain);
448 control_c_pressed_flag = 0;
449 goto done;
450 }
452 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) )
453 {
454 nanosleep(&ts,NULL);
455 goto retry;
456 }
457 done:
458 if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
459 printf("get_online_cpumap failed\n");
460 if (online_cpumap != cpumap)
461 online_vcpus_changed(cpumap);
462 return retval;
464 }
467 long
468 xc_ptrace(
469 int xc_handle,
470 enum __ptrace_request request,
471 uint32_t domid_tid,
472 long eaddr,
473 long edata)
474 {
475 DECLARE_DOM0_OP;
476 struct gdb_regs pt;
477 long retval = 0;
478 unsigned long *guest_va;
479 cpumap_t cpumap;
480 int cpu, index;
481 void *addr = (char *)eaddr;
482 void *data = (char *)edata;
484 cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
486 switch ( request )
487 {
488 case PTRACE_PEEKTEXT:
489 case PTRACE_PEEKDATA:
490 if (current_isfile)
491 guest_va = (unsigned long *)map_domain_va_core(current_domid,
492 cpu, addr, ctxt);
493 else
494 guest_va = (unsigned long *)map_domain_va(xc_handle,
495 cpu, addr, PROT_READ);
496 if ( guest_va == NULL )
497 goto out_error;
498 retval = *guest_va;
499 break;
501 case PTRACE_POKETEXT:
502 case PTRACE_POKEDATA:
503 /* XXX assume that all CPUs have the same address space */
504 if (current_isfile)
505 guest_va = (unsigned long *)map_domain_va_core(current_domid,
506 cpu, addr, ctxt);
507 else
508 guest_va = (unsigned long *)map_domain_va(xc_handle,
509 cpu, addr, PROT_READ|PROT_WRITE);
510 if ( guest_va == NULL )
511 goto out_error;
512 *guest_va = (unsigned long)data;
513 break;
515 case PTRACE_GETREGS:
516 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
517 goto out_error;
518 SET_PT_REGS(pt, ctxt[cpu].user_regs);
519 memcpy(data, &pt, sizeof(struct gdb_regs));
520 break;
522 case PTRACE_GETFPREGS:
523 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
524 goto out_error;
525 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof (elf_fpregset_t));
526 break;
528 case PTRACE_GETFPXREGS:
529 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
530 goto out_error;
531 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
532 break;
534 case PTRACE_SETREGS:
535 if (current_isfile)
536 goto out_unsupported; /* XXX not yet supported */
537 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
538 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
539 &ctxt[cpu])))
540 goto out_error_dom0;
541 break;
543 case PTRACE_SINGLESTEP:
544 if (current_isfile)
545 goto out_unsupported; /* XXX not yet supported */
546 /* XXX we can still have problems if the user switches threads
547 * during single-stepping - but that just seems retarded
548 */
549 ctxt[cpu].user_regs.eflags |= PSL_T;
550 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
551 &ctxt[cpu])))
552 goto out_error_dom0;
553 /* FALLTHROUGH */
555 case PTRACE_CONT:
556 case PTRACE_DETACH:
557 if (current_isfile)
558 goto out_unsupported; /* XXX not yet supported */
559 if ( request != PTRACE_SINGLESTEP )
560 {
561 FOREACH_CPU(cpumap, index) {
562 cpu = index - 1;
563 if (fetch_regs(xc_handle, cpu, NULL))
564 goto out_error;
565 /* Clear trace flag */
566 if ( ctxt[cpu].user_regs.eflags & PSL_T )
567 {
568 ctxt[cpu].user_regs.eflags &= ~PSL_T;
569 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
570 cpu, &ctxt[cpu])))
571 goto out_error_dom0;
572 }
573 }
574 }
575 if ( request == PTRACE_DETACH )
576 {
577 op.cmd = DOM0_SETDEBUGGING;
578 op.u.setdebugging.domain = current_domid;
579 op.u.setdebugging.enable = 0;
580 if ((retval = do_dom0_op(xc_handle, &op)))
581 goto out_error_dom0;
582 }
583 regs_valid = 0;
584 if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
585 current_domid : -current_domid)))
586 goto out_error_dom0;
587 break;
589 case PTRACE_ATTACH:
590 current_domid = domid_tid;
591 current_isfile = (int)edata;
592 if (current_isfile)
593 break;
594 op.cmd = DOM0_GETDOMAININFO;
595 op.u.getdomaininfo.domain = current_domid;
596 retval = do_dom0_op(xc_handle, &op);
597 if ( retval || (op.u.getdomaininfo.domain != current_domid) )
598 goto out_error_dom0;
599 if ( op.u.getdomaininfo.flags & DOMFLAGS_PAUSED )
600 printf("domain currently paused\n");
601 else if ((retval = xc_domain_pause(xc_handle, current_domid)))
602 goto out_error_dom0;
603 op.cmd = DOM0_SETDEBUGGING;
604 op.u.setdebugging.domain = current_domid;
605 op.u.setdebugging.enable = 1;
606 if ((retval = do_dom0_op(xc_handle, &op)))
607 goto out_error_dom0;
609 if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
610 printf("get_online_cpumap failed\n");
611 if (online_cpumap != cpumap)
612 online_vcpus_changed(cpumap);
613 break;
615 case PTRACE_SETFPREGS:
616 case PTRACE_SETFPXREGS:
617 case PTRACE_PEEKUSER:
618 case PTRACE_POKEUSER:
619 case PTRACE_SYSCALL:
620 case PTRACE_KILL:
621 goto out_unsupported; /* XXX not yet supported */
623 case PTRACE_TRACEME:
624 printf("PTRACE_TRACEME is an invalid request under Xen\n");
625 goto out_error;
626 }
628 return retval;
630 out_error_dom0:
631 perror("dom0 op failed");
632 out_error:
633 errno = EINVAL;
634 return retval;
636 out_unsupported:
637 #ifdef DEBUG
638 printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
639 #endif
640 errno = ENOSYS;
641 return -1;
643 }
645 int
646 xc_waitdomain(
647 int xc_handle,
648 int domain,
649 int *status,
650 int options)
651 {
652 if (current_isfile)
653 return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
654 return __xc_waitdomain(xc_handle, domain, status, options);
655 }
657 /*
658 * Local variables:
659 * mode: C
660 * c-set-style: "BSD"
661 * c-basic-offset: 4
662 * tab-width: 4
663 * indent-tabs-mode: nil
664 * End:
665 */