ia64/xen-unstable

view tools/libxc/xc_ptrace.c @ 16544:d7a0a73e5dca

xc ptrace: Fix a couple of bugs in page-table walking.
Signed-off-by: John Zulauf <john.zulauf@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 06 11:29:18 2007 +0000 (2007-12-06)
parents 2a7339251e4d
children d3a87899985d
line source
1 #include <sys/ptrace.h>
2 #include <sys/wait.h>
3 #include <time.h>
5 #include "xc_private.h"
6 #include "xg_private.h"
7 #include "xc_ptrace.h"
9 #ifdef DEBUG
10 static char *ptrace_names[] = {
11 "PTRACE_TRACEME",
12 "PTRACE_PEEKTEXT",
13 "PTRACE_PEEKDATA",
14 "PTRACE_PEEKUSER",
15 "PTRACE_POKETEXT",
16 "PTRACE_POKEDATA",
17 "PTRACE_POKEUSER",
18 "PTRACE_CONT",
19 "PTRACE_KILL",
20 "PTRACE_SINGLESTEP",
21 "PTRACE_INVALID",
22 "PTRACE_INVALID",
23 "PTRACE_GETREGS",
24 "PTRACE_SETREGS",
25 "PTRACE_GETFPREGS",
26 "PTRACE_SETFPREGS",
27 "PTRACE_ATTACH",
28 "PTRACE_DETACH",
29 "PTRACE_GETFPXREGS",
30 "PTRACE_SETFPXREGS",
31 "PTRACE_INVALID",
32 "PTRACE_INVALID",
33 "PTRACE_INVALID",
34 "PTRACE_INVALID",
35 "PTRACE_SYSCALL",
36 };
37 #endif
39 static int current_domid = -1;
40 static int current_isfile;
41 static int current_is_hvm;
43 static uint64_t online_cpumap;
44 static uint64_t regs_valid;
45 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
47 extern int ffsll(long long int);
48 #define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
50 static int
51 fetch_regs(int xc_handle, int cpu, int *online)
52 {
53 xc_vcpuinfo_t info;
54 int retval = 0;
56 if (online)
57 *online = 0;
58 if ( !(regs_valid & (1 << cpu)) )
59 {
60 retval = xc_vcpu_getcontext(xc_handle, current_domid,
61 cpu, &ctxt[cpu]);
62 if ( retval )
63 goto done;
64 regs_valid |= (1 << cpu);
66 }
67 if ( online == NULL )
68 goto done;
70 retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
71 *online = info.online;
73 done:
74 return retval;
75 }
77 static struct thr_ev_handlers {
78 thr_ev_handler_t td_create;
79 thr_ev_handler_t td_death;
80 } handlers;
82 void
83 xc_register_event_handler(thr_ev_handler_t h,
84 td_event_e e)
85 {
86 switch (e) {
87 case TD_CREATE:
88 handlers.td_create = h;
89 break;
90 case TD_DEATH:
91 handlers.td_death = h;
92 break;
93 default:
94 abort(); /* XXX */
95 }
96 }
98 static inline int
99 paging_enabled(vcpu_guest_context_t *v)
100 {
101 unsigned long cr0 = v->ctrlreg[0];
102 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
103 }
105 /*
106 * Fetch registers for all online cpus and set the cpumap
107 * to indicate which cpus are online
108 *
109 */
111 static int
112 get_online_cpumap(int xc_handle, struct xen_domctl_getdomaininfo *d,
113 uint64_t *cpumap)
114 {
115 int i, online, retval;
117 *cpumap = 0;
118 for (i = 0; i <= d->max_vcpu_id; i++) {
119 if ((retval = fetch_regs(xc_handle, i, &online)))
120 return retval;
121 if (online)
122 *cpumap |= (1 << i);
123 }
125 return 0;
126 }
128 /*
129 * Notify GDB of any vcpus that have come online or gone offline
130 * update online_cpumap
131 *
132 */
134 static void
135 online_vcpus_changed(uint64_t cpumap)
136 {
137 uint64_t changed_cpumap = cpumap ^ online_cpumap;
138 int index;
140 while ( (index = ffsll(changed_cpumap)) ) {
141 if ( cpumap & (1 << (index - 1)) )
142 {
143 if (handlers.td_create) handlers.td_create(index - 1);
144 } else {
145 IPRINTF("thread death: %d\n", index - 1);
146 if (handlers.td_death) handlers.td_death(index - 1);
147 }
148 changed_cpumap &= ~(1 << (index - 1));
149 }
150 online_cpumap = cpumap;
152 }
154 /* --------------------- */
155 /* XXX application state */
156 static long nr_pages = 0;
157 static uint64_t *page_array = NULL;
159 static uint64_t to_ma(int cpu, uint64_t maddr)
160 {
161 return maddr;
162 }
164 static void *
165 map_domain_va_32(
166 int xc_handle,
167 int cpu,
168 void *guest_va,
169 int perm)
170 {
171 unsigned long l2e, l1e, l1p, p, va = (unsigned long)guest_va;
172 uint32_t *l2, *l1;
173 static void *v[MAX_VIRT_CPUS];
175 l2 = xc_map_foreign_range(
176 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
177 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
178 if ( l2 == NULL )
179 return NULL;
181 l2e = l2[l2_table_offset_i386(va)];
182 munmap(l2, PAGE_SIZE);
183 if ( !(l2e & _PAGE_PRESENT) )
184 return NULL;
185 l1p = to_ma(cpu, l2e);
186 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
187 if ( l1 == NULL )
188 return NULL;
190 l1e = l1[l1_table_offset_i386(va)];
191 munmap(l1, PAGE_SIZE);
192 if ( !(l1e & _PAGE_PRESENT) )
193 return NULL;
194 p = to_ma(cpu, l1e);
195 if ( v[cpu] != NULL )
196 munmap(v[cpu], PAGE_SIZE);
197 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
198 if ( v[cpu] == NULL )
199 return NULL;
201 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
202 }
205 static void *
206 map_domain_va_pae(
207 int xc_handle,
208 int cpu,
209 void *guest_va,
210 int perm)
211 {
212 uint64_t l3e, l2e, l1e, l2p, l1p, p;
213 unsigned long va = (unsigned long)guest_va;
214 uint64_t *l3, *l2, *l1;
215 static void *v[MAX_VIRT_CPUS];
217 l3 = xc_map_foreign_range(
218 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
219 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
220 if ( l3 == NULL )
221 return NULL;
223 l3e = l3[l3_table_offset_pae(va)];
224 munmap(l3, PAGE_SIZE);
225 if ( !(l3e & _PAGE_PRESENT) )
226 return NULL;
227 l2p = to_ma(cpu, l3e);
228 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
229 if ( l2 == NULL )
230 return NULL;
232 l2e = l2[l2_table_offset_pae(va)];
233 munmap(l2, PAGE_SIZE);
234 if ( !(l2e & _PAGE_PRESENT) )
235 return NULL;
236 l1p = to_ma(cpu, l2e);
237 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
238 if ( l1 == NULL )
239 return NULL;
241 l1e = l1[l1_table_offset_pae(va)];
242 munmap(l1, PAGE_SIZE);
243 if ( !(l1e & _PAGE_PRESENT) )
244 return NULL;
245 p = to_ma(cpu, l1e);
246 if ( v[cpu] != NULL )
247 munmap(v[cpu], PAGE_SIZE);
248 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
249 if ( v[cpu] == NULL )
250 return NULL;
252 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
253 }
255 #ifdef __x86_64__
256 static void *
257 map_domain_va_64(
258 int xc_handle,
259 int cpu,
260 void *guest_va,
261 int perm)
262 {
263 unsigned long l4e, l3e, l2e, l1e, l3p, l2p, l1p, p, va = (unsigned long)guest_va;
264 uint64_t *l4, *l3, *l2, *l1;
265 static void *v[MAX_VIRT_CPUS];
267 if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
268 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
270 l4 = xc_map_foreign_range(
271 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
272 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
273 if ( l4 == NULL )
274 return NULL;
276 l4e = l4[l4_table_offset(va)];
277 munmap(l4, PAGE_SIZE);
278 if ( !(l4e & _PAGE_PRESENT) )
279 return NULL;
280 l3p = to_ma(cpu, l4e);
281 l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT);
282 if ( l3 == NULL )
283 return NULL;
285 l3e = l3[l3_table_offset(va)];
286 munmap(l3, PAGE_SIZE);
287 if ( !(l3e & _PAGE_PRESENT) )
288 return NULL;
289 l2p = to_ma(cpu, l3e);
290 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
291 if ( l2 == NULL )
292 return NULL;
294 l2e = l2[l2_table_offset(va)];
295 munmap(l2, PAGE_SIZE);
296 if ( !(l2e & _PAGE_PRESENT) )
297 return NULL;
298 l1p = to_ma(cpu, l2e);
299 if (l2e & 0x80) { /* 2M pages */
300 p = to_ma(cpu, l1p + (l1_table_offset(va) << PAGE_SHIFT));
301 } else { /* 4K pages */
302 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
303 if ( l1 == NULL )
304 return NULL;
306 l1e = l1[l1_table_offset(va)];
307 munmap(l1, PAGE_SIZE);
308 if ( !(l1e & _PAGE_PRESENT) )
309 return NULL;
310 p = to_ma(cpu, l1e);
311 }
312 if ( v[cpu] != NULL )
313 munmap(v[cpu], PAGE_SIZE);
314 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
315 if ( v[cpu] == NULL )
316 return NULL;
318 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
319 }
320 #endif
322 static void *
323 map_domain_va(
324 int xc_handle,
325 int cpu,
326 void *guest_va,
327 int perm)
328 {
329 unsigned long va = (unsigned long) guest_va;
330 long npgs = xc_get_tot_pages(xc_handle, current_domid);
331 static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
333 if ( mode == MODE_UNKNOWN )
334 {
335 xen_capabilities_info_t caps;
336 (void)xc_version(xc_handle, XENVER_capabilities, caps);
337 if ( strstr(caps, "-x86_64") )
338 mode = MODE_64;
339 else if ( strstr(caps, "-x86_32p") )
340 mode = MODE_PAE;
341 else if ( strstr(caps, "-x86_32") )
342 mode = MODE_32;
343 }
345 if ( nr_pages != npgs )
346 {
347 if ( nr_pages > 0 )
348 free(page_array);
349 nr_pages = npgs;
350 if ( (page_array = malloc(nr_pages * sizeof(*page_array))) == NULL )
351 {
352 IPRINTF("Could not allocate memory\n");
353 return NULL;
354 }
355 if ( xc_get_pfn_list(xc_handle, current_domid,
356 page_array, nr_pages) != nr_pages )
357 {
358 IPRINTF("Could not get the page frame list\n");
359 return NULL;
360 }
361 }
363 if (fetch_regs(xc_handle, cpu, NULL))
364 return NULL;
366 if (!paging_enabled(&ctxt[cpu])) {
367 static void * v;
368 uint64_t page;
370 if ( v != NULL )
371 munmap(v, PAGE_SIZE);
373 page = to_ma(cpu, va);
375 v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
376 perm, page >> PAGE_SHIFT);
378 if ( v == NULL )
379 return NULL;
381 return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
382 }
383 #ifdef __x86_64__
384 if ( mode == MODE_64 )
385 return map_domain_va_64(xc_handle, cpu, guest_va, perm);
386 #endif
387 if ( mode == MODE_PAE )
388 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
389 /* else ( mode == MODE_32 ) */
390 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
391 }
393 int control_c_pressed_flag = 0;
395 static int
396 __xc_waitdomain(
397 int xc_handle,
398 int domain,
399 int *status,
400 int options)
401 {
402 DECLARE_DOMCTL;
403 int retval;
404 struct timespec ts;
405 uint64_t cpumap;
407 ts.tv_sec = 0;
408 ts.tv_nsec = 10*1000*1000;
410 domctl.cmd = XEN_DOMCTL_getdomaininfo;
411 domctl.domain = domain;
413 retry:
414 retval = do_domctl(xc_handle, &domctl);
415 if ( retval || (domctl.domain != domain) )
416 {
417 IPRINTF("getdomaininfo failed\n");
418 goto done;
419 }
420 *status = domctl.u.getdomaininfo.flags;
422 if ( options & WNOHANG )
423 goto done;
425 if (control_c_pressed_flag) {
426 xc_domain_pause(xc_handle, domain);
427 control_c_pressed_flag = 0;
428 goto done;
429 }
431 if ( !(domctl.u.getdomaininfo.flags & XEN_DOMINF_paused) )
432 {
433 nanosleep(&ts,NULL);
434 goto retry;
435 }
436 done:
437 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
438 IPRINTF("get_online_cpumap failed\n");
439 if (online_cpumap != cpumap)
440 online_vcpus_changed(cpumap);
441 return retval;
443 }
446 long
447 xc_ptrace(
448 int xc_handle,
449 enum __ptrace_request request,
450 uint32_t domid_tid,
451 long eaddr,
452 long edata)
453 {
454 DECLARE_DOMCTL;
455 struct gdb_regs pt;
456 long retval = 0;
457 unsigned long *guest_va;
458 uint64_t cpumap;
459 int cpu, index;
460 void *addr = (char *)eaddr;
461 void *data = (char *)edata;
463 cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
465 switch ( request )
466 {
467 case PTRACE_PEEKTEXT:
468 case PTRACE_PEEKDATA:
469 if (current_isfile)
470 guest_va = (unsigned long *)map_domain_va_core(
471 current_domid, cpu, addr, ctxt);
472 else
473 guest_va = (unsigned long *)map_domain_va(
474 xc_handle, cpu, addr, PROT_READ);
475 if ( guest_va == NULL )
476 goto out_error;
477 retval = *guest_va;
478 break;
480 case PTRACE_POKETEXT:
481 case PTRACE_POKEDATA:
482 /* XXX assume that all CPUs have the same address space */
483 if (current_isfile)
484 guest_va = (unsigned long *)map_domain_va_core(
485 current_domid, cpu, addr, ctxt);
486 else
487 guest_va = (unsigned long *)map_domain_va(
488 xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
489 if ( guest_va == NULL )
490 goto out_error;
491 *guest_va = (unsigned long)data;
492 break;
494 case PTRACE_GETREGS:
495 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
496 goto out_error;
497 SET_PT_REGS(pt, ctxt[cpu].user_regs);
498 memcpy(data, &pt, sizeof(struct gdb_regs));
499 break;
501 case PTRACE_GETFPREGS:
502 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
503 goto out_error;
504 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof (elf_fpregset_t));
505 break;
507 case PTRACE_GETFPXREGS:
508 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
509 goto out_error;
510 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
511 break;
513 case PTRACE_SETREGS:
514 if (current_isfile)
515 goto out_unsupported; /* XXX not yet supported */
516 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
517 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
518 &ctxt[cpu])))
519 goto out_error_domctl;
520 break;
522 case PTRACE_SINGLESTEP:
523 if (current_isfile)
524 goto out_unsupported; /* XXX not yet supported */
525 /* XXX we can still have problems if the user switches threads
526 * during single-stepping - but that just seems retarded
527 */
528 ctxt[cpu].user_regs.eflags |= PSL_T;
529 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
530 &ctxt[cpu])))
531 goto out_error_domctl;
532 /* FALLTHROUGH */
534 case PTRACE_CONT:
535 case PTRACE_DETACH:
536 if (current_isfile)
537 goto out_unsupported; /* XXX not yet supported */
538 if ( request != PTRACE_SINGLESTEP )
539 {
540 FOREACH_CPU(cpumap, index) {
541 cpu = index - 1;
542 if (fetch_regs(xc_handle, cpu, NULL))
543 goto out_error;
544 /* Clear trace flag */
545 if ( ctxt[cpu].user_regs.eflags & PSL_T )
546 {
547 ctxt[cpu].user_regs.eflags &= ~PSL_T;
548 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
549 cpu, &ctxt[cpu])))
550 goto out_error_domctl;
551 }
552 }
553 }
554 if ( request == PTRACE_DETACH )
555 {
556 if ((retval = xc_domain_setdebugging(xc_handle, current_domid, 0)))
557 goto out_error_domctl;
558 }
559 regs_valid = 0;
560 if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
561 current_domid : -current_domid)))
562 goto out_error_domctl;
563 break;
565 case PTRACE_ATTACH:
566 current_domid = domid_tid;
567 current_isfile = (int)edata;
568 if (current_isfile)
569 break;
570 domctl.cmd = XEN_DOMCTL_getdomaininfo;
571 domctl.domain = current_domid;
572 retval = do_domctl(xc_handle, &domctl);
573 if ( retval || (domctl.domain != current_domid) )
574 goto out_error_domctl;
575 if ( domctl.u.getdomaininfo.flags & XEN_DOMINF_paused )
576 IPRINTF("domain currently paused\n");
577 else if ((retval = xc_domain_pause(xc_handle, current_domid)))
578 goto out_error_domctl;
579 current_is_hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
580 if ((retval = xc_domain_setdebugging(xc_handle, current_domid, 1)))
581 goto out_error_domctl;
583 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
584 IPRINTF("get_online_cpumap failed\n");
585 if (online_cpumap != cpumap)
586 online_vcpus_changed(cpumap);
587 break;
589 case PTRACE_TRACEME:
590 IPRINTF("PTRACE_TRACEME is an invalid request under Xen\n");
591 goto out_error;
593 default:
594 goto out_unsupported; /* XXX not yet supported */
595 }
597 return retval;
599 out_error_domctl:
600 perror("domctl failed");
601 out_error:
602 errno = EINVAL;
603 return retval;
605 out_unsupported:
606 #ifdef DEBUG
607 IPRINTF("unsupported xc_ptrace request %s\n", ptrace_names[request]);
608 #endif
609 errno = ENOSYS;
610 return -1;
612 }
614 int
615 xc_waitdomain(
616 int xc_handle,
617 int domain,
618 int *status,
619 int options)
620 {
621 if (current_isfile)
622 return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
623 return __xc_waitdomain(xc_handle, domain, status, options);
624 }
626 /*
627 * Local variables:
628 * mode: C
629 * c-set-style: "BSD"
630 * c-basic-offset: 4
631 * tab-width: 4
632 * indent-tabs-mode: nil
633 * End:
634 */