ia64/xen-unstable

view tools/libxc/xc_ptrace.c @ 18573:8dc05a2b3beb

xc_ptrace: Allow gdbserver to connect to a guest before APs are
brought online.

Signed-off-by: Kip Macy <kmacy@freebsd.org>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 02 11:32:08 2008 +0100 (2008-10-02)
parents ec5717ac4815
children 5b73fa1b9562
line source
1 #include <sys/ptrace.h>
2 #include <sys/wait.h>
3 #include <time.h>
5 #include "xc_private.h"
6 #include "xg_private.h"
7 #include "xc_ptrace.h"
9 #ifdef DEBUG
10 static char *ptrace_names[] = {
11 "PTRACE_TRACEME",
12 "PTRACE_PEEKTEXT",
13 "PTRACE_PEEKDATA",
14 "PTRACE_PEEKUSER",
15 "PTRACE_POKETEXT",
16 "PTRACE_POKEDATA",
17 "PTRACE_POKEUSER",
18 "PTRACE_CONT",
19 "PTRACE_KILL",
20 "PTRACE_SINGLESTEP",
21 "PTRACE_INVALID",
22 "PTRACE_INVALID",
23 "PTRACE_GETREGS",
24 "PTRACE_SETREGS",
25 "PTRACE_GETFPREGS",
26 "PTRACE_SETFPREGS",
27 "PTRACE_ATTACH",
28 "PTRACE_DETACH",
29 "PTRACE_GETFPXREGS",
30 "PTRACE_SETFPXREGS",
31 "PTRACE_INVALID",
32 "PTRACE_INVALID",
33 "PTRACE_INVALID",
34 "PTRACE_INVALID",
35 "PTRACE_SYSCALL",
36 };
37 #endif
39 static int current_domid = -1;
40 static int current_isfile;
41 static int current_is_hvm;
43 static uint64_t online_cpumap;
44 static uint64_t regs_valid;
45 static vcpu_guest_context_any_t ctxt[MAX_VIRT_CPUS];
47 extern int ffsll(long long int);
48 #define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
50 static int
51 fetch_regs(int xc_handle, int cpu, int *online)
52 {
53 xc_vcpuinfo_t info;
54 int retval = 0;
56 if (online)
57 *online = 0;
58 if ( !(regs_valid & (1 << cpu)) )
59 {
60 retval = xc_vcpu_getcontext(xc_handle, current_domid,
61 cpu, &ctxt[cpu]);
62 if ( retval )
63 goto done;
64 regs_valid |= (1 << cpu);
66 }
67 if ( online == NULL )
68 goto done;
70 retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
71 *online = info.online;
73 done:
74 return retval;
75 }
77 static struct thr_ev_handlers {
78 thr_ev_handler_t td_create;
79 thr_ev_handler_t td_death;
80 } handlers;
82 void
83 xc_register_event_handler(thr_ev_handler_t h,
84 td_event_e e)
85 {
86 switch (e) {
87 case TD_CREATE:
88 handlers.td_create = h;
89 break;
90 case TD_DEATH:
91 handlers.td_death = h;
92 break;
93 default:
94 abort(); /* XXX */
95 }
96 }
98 static inline int
99 paging_enabled(vcpu_guest_context_any_t *v)
100 {
101 unsigned long cr0 = v->c.ctrlreg[0];
102 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
103 }
105 /*
106 * Fetch registers for all online cpus and set the cpumap
107 * to indicate which cpus are online
108 *
109 */
111 static int
112 get_online_cpumap(int xc_handle, struct xen_domctl_getdomaininfo *d,
113 uint64_t *cpumap)
114 {
115 int i, online;
117 *cpumap = 0;
118 for (i = 0; i <= d->max_vcpu_id; i++) {
119 fetch_regs(xc_handle, i, &online);
120 if (online)
121 *cpumap |= (1 << i);
122 }
124 return (*cpumap == 0) ? -1 : 0;
125 }
127 /*
128 * Notify GDB of any vcpus that have come online or gone offline
129 * update online_cpumap
130 *
131 */
133 static void
134 online_vcpus_changed(uint64_t cpumap)
135 {
136 uint64_t changed_cpumap = cpumap ^ online_cpumap;
137 int index;
139 while ( (index = ffsll(changed_cpumap)) ) {
140 if ( cpumap & (1 << (index - 1)) )
141 {
142 if (handlers.td_create) handlers.td_create(index - 1);
143 } else {
144 IPRINTF("thread death: %d\n", index - 1);
145 if (handlers.td_death) handlers.td_death(index - 1);
146 }
147 changed_cpumap &= ~(1 << (index - 1));
148 }
149 online_cpumap = cpumap;
151 }
153 /* --------------------- */
154 /* XXX application state */
155 static long nr_pages = 0;
156 static uint64_t *page_array = NULL;
158 static uint64_t to_ma(int cpu, uint64_t maddr)
159 {
160 return maddr;
161 }
163 static void *
164 map_domain_va_32(
165 int xc_handle,
166 int cpu,
167 void *guest_va,
168 int perm)
169 {
170 unsigned long l2e, l1e, l1p, p, va = (unsigned long)guest_va;
171 uint32_t *l2, *l1;
172 static void *v[MAX_VIRT_CPUS];
174 l2 = xc_map_foreign_range(
175 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
176 xen_cr3_to_pfn(ctxt[cpu].c.ctrlreg[3]));
177 if ( l2 == NULL )
178 return NULL;
180 l2e = l2[l2_table_offset_i386(va)];
181 munmap(l2, PAGE_SIZE);
182 if ( !(l2e & _PAGE_PRESENT) )
183 return NULL;
184 l1p = to_ma(cpu, l2e);
185 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
186 if ( l1 == NULL )
187 return NULL;
189 l1e = l1[l1_table_offset_i386(va)];
190 munmap(l1, PAGE_SIZE);
191 if ( !(l1e & _PAGE_PRESENT) )
192 return NULL;
193 p = to_ma(cpu, l1e);
194 if ( v[cpu] != NULL )
195 munmap(v[cpu], PAGE_SIZE);
196 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
197 if ( v[cpu] == NULL )
198 return NULL;
200 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
201 }
204 static void *
205 map_domain_va_pae(
206 int xc_handle,
207 int cpu,
208 void *guest_va,
209 int perm)
210 {
211 uint64_t l3e, l2e, l1e, l2p, l1p, p;
212 unsigned long va = (unsigned long)guest_va;
213 uint64_t *l3, *l2, *l1;
214 static void *v[MAX_VIRT_CPUS];
216 l3 = xc_map_foreign_range(
217 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
218 xen_cr3_to_pfn(ctxt[cpu].c.ctrlreg[3]));
219 if ( l3 == NULL )
220 return NULL;
222 l3e = l3[l3_table_offset_pae(va)];
223 munmap(l3, PAGE_SIZE);
224 if ( !(l3e & _PAGE_PRESENT) )
225 return NULL;
226 l2p = to_ma(cpu, l3e);
227 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
228 if ( l2 == NULL )
229 return NULL;
231 l2e = l2[l2_table_offset_pae(va)];
232 munmap(l2, PAGE_SIZE);
233 if ( !(l2e & _PAGE_PRESENT) )
234 return NULL;
235 l1p = to_ma(cpu, l2e);
236 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
237 if ( l1 == NULL )
238 return NULL;
240 l1e = l1[l1_table_offset_pae(va)];
241 munmap(l1, PAGE_SIZE);
242 if ( !(l1e & _PAGE_PRESENT) )
243 return NULL;
244 p = to_ma(cpu, l1e);
245 if ( v[cpu] != NULL )
246 munmap(v[cpu], PAGE_SIZE);
247 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
248 if ( v[cpu] == NULL )
249 return NULL;
251 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
252 }
254 #ifdef __x86_64__
255 static void *
256 map_domain_va_64(
257 int xc_handle,
258 int cpu,
259 void *guest_va,
260 int perm)
261 {
262 unsigned long l4e, l3e, l2e, l1e, l3p, l2p, l1p, p, va = (unsigned long)guest_va;
263 uint64_t *l4, *l3, *l2, *l1;
264 static void *v[MAX_VIRT_CPUS];
266 if ((ctxt[cpu].c.ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
267 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
269 l4 = xc_map_foreign_range(
270 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
271 xen_cr3_to_pfn(ctxt[cpu].c.ctrlreg[3]));
272 if ( l4 == NULL )
273 return NULL;
275 l4e = l4[l4_table_offset(va)];
276 munmap(l4, PAGE_SIZE);
277 if ( !(l4e & _PAGE_PRESENT) )
278 return NULL;
279 l3p = to_ma(cpu, l4e);
280 l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT);
281 if ( l3 == NULL )
282 return NULL;
284 l3e = l3[l3_table_offset(va)];
285 munmap(l3, PAGE_SIZE);
286 if ( !(l3e & _PAGE_PRESENT) )
287 return NULL;
288 l2p = to_ma(cpu, l3e);
289 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
290 if ( l2 == NULL )
291 return NULL;
293 l2e = l2[l2_table_offset(va)];
294 munmap(l2, PAGE_SIZE);
295 if ( !(l2e & _PAGE_PRESENT) )
296 return NULL;
297 l1p = to_ma(cpu, l2e);
298 if (l2e & 0x80) { /* 2M pages */
299 p = to_ma(cpu, l1p + (l1_table_offset(va) << PAGE_SHIFT));
300 } else { /* 4K pages */
301 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
302 if ( l1 == NULL )
303 return NULL;
305 l1e = l1[l1_table_offset(va)];
306 munmap(l1, PAGE_SIZE);
307 if ( !(l1e & _PAGE_PRESENT) )
308 return NULL;
309 p = to_ma(cpu, l1e);
310 }
311 if ( v[cpu] != NULL )
312 munmap(v[cpu], PAGE_SIZE);
313 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
314 if ( v[cpu] == NULL )
315 return NULL;
317 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
318 }
319 #endif
321 static void *
322 map_domain_va(
323 int xc_handle,
324 int cpu,
325 void *guest_va,
326 int perm)
327 {
328 unsigned long va = (unsigned long) guest_va;
329 long npgs = xc_get_tot_pages(xc_handle, current_domid);
330 static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
332 if ( mode == MODE_UNKNOWN )
333 {
334 xen_capabilities_info_t caps;
335 (void)xc_version(xc_handle, XENVER_capabilities, caps);
336 if ( strstr(caps, "-x86_64") )
337 mode = MODE_64;
338 else if ( strstr(caps, "-x86_32p") )
339 mode = MODE_PAE;
340 else if ( strstr(caps, "-x86_32") )
341 mode = MODE_32;
342 }
344 if ( nr_pages != npgs )
345 {
346 if ( nr_pages > 0 )
347 free(page_array);
348 nr_pages = npgs;
349 if ( (page_array = malloc(nr_pages * sizeof(*page_array))) == NULL )
350 {
351 IPRINTF("Could not allocate memory\n");
352 return NULL;
353 }
354 if ( xc_get_pfn_list(xc_handle, current_domid,
355 page_array, nr_pages) != nr_pages )
356 {
357 IPRINTF("Could not get the page frame list\n");
358 return NULL;
359 }
360 }
362 if (fetch_regs(xc_handle, cpu, NULL))
363 return NULL;
365 if (!paging_enabled(&ctxt[cpu])) {
366 static void * v;
367 uint64_t page;
369 if ( v != NULL )
370 munmap(v, PAGE_SIZE);
372 page = to_ma(cpu, va);
374 v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
375 perm, page >> PAGE_SHIFT);
377 if ( v == NULL )
378 return NULL;
380 return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
381 }
382 #ifdef __x86_64__
383 if ( mode == MODE_64 )
384 return map_domain_va_64(xc_handle, cpu, guest_va, perm);
385 #endif
386 if ( mode == MODE_PAE )
387 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
388 /* else ( mode == MODE_32 ) */
389 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
390 }
392 int control_c_pressed_flag = 0;
394 static int
395 __xc_waitdomain(
396 int xc_handle,
397 int domain,
398 int *status,
399 int options)
400 {
401 DECLARE_DOMCTL;
402 int retval;
403 struct timespec ts;
404 uint64_t cpumap;
406 ts.tv_sec = 0;
407 ts.tv_nsec = 10*1000*1000;
409 domctl.cmd = XEN_DOMCTL_getdomaininfo;
410 domctl.domain = domain;
412 retry:
413 retval = do_domctl(xc_handle, &domctl);
414 if ( retval || (domctl.domain != domain) )
415 {
416 IPRINTF("getdomaininfo failed\n");
417 goto done;
418 }
419 *status = domctl.u.getdomaininfo.flags;
421 if ( options & WNOHANG )
422 goto done;
424 if (control_c_pressed_flag) {
425 xc_domain_pause(xc_handle, domain);
426 control_c_pressed_flag = 0;
427 goto done;
428 }
430 if ( !(domctl.u.getdomaininfo.flags & XEN_DOMINF_paused) )
431 {
432 nanosleep(&ts,NULL);
433 goto retry;
434 }
435 done:
436 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
437 IPRINTF("get_online_cpumap failed\n");
438 if (online_cpumap != cpumap)
439 online_vcpus_changed(cpumap);
440 return retval;
442 }
445 long
446 xc_ptrace(
447 int xc_handle,
448 enum __ptrace_request request,
449 uint32_t domid_tid,
450 long eaddr,
451 long edata)
452 {
453 DECLARE_DOMCTL;
454 struct gdb_regs pt;
455 long retval = 0;
456 unsigned long *guest_va;
457 uint64_t cpumap;
458 int cpu, index;
459 void *addr = (char *)eaddr;
460 void *data = (char *)edata;
462 cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
464 switch ( request )
465 {
466 case PTRACE_PEEKTEXT:
467 case PTRACE_PEEKDATA:
468 if (current_isfile)
469 guest_va = (unsigned long *)map_domain_va_core(
470 current_domid, cpu, addr, ctxt);
471 else
472 guest_va = (unsigned long *)map_domain_va(
473 xc_handle, cpu, addr, PROT_READ);
474 if ( guest_va == NULL )
475 goto out_error;
476 retval = *guest_va;
477 break;
479 case PTRACE_POKETEXT:
480 case PTRACE_POKEDATA:
481 /* XXX assume that all CPUs have the same address space */
482 if (current_isfile)
483 guest_va = (unsigned long *)map_domain_va_core(
484 current_domid, cpu, addr, ctxt);
485 else
486 guest_va = (unsigned long *)map_domain_va(
487 xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
488 if ( guest_va == NULL )
489 goto out_error;
490 *guest_va = (unsigned long)data;
491 break;
493 case PTRACE_GETREGS:
494 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
495 goto out_error;
496 SET_PT_REGS(pt, ctxt[cpu].c.user_regs);
497 memcpy(data, &pt, sizeof(struct gdb_regs));
498 break;
500 case PTRACE_GETFPREGS:
501 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
502 goto out_error;
503 memcpy(data, &ctxt[cpu].c.fpu_ctxt, sizeof (elf_fpregset_t));
504 break;
506 case PTRACE_GETFPXREGS:
507 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
508 goto out_error;
509 memcpy(data, &ctxt[cpu].c.fpu_ctxt, sizeof(ctxt[cpu].c.fpu_ctxt));
510 break;
512 case PTRACE_SETREGS:
513 if (current_isfile)
514 goto out_unsupported; /* XXX not yet supported */
515 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].c.user_regs);
516 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
517 &ctxt[cpu])))
518 goto out_error_domctl;
519 break;
521 case PTRACE_SINGLESTEP:
522 if (current_isfile)
523 goto out_unsupported; /* XXX not yet supported */
524 /* XXX we can still have problems if the user switches threads
525 * during single-stepping - but that just seems retarded
526 */
527 ctxt[cpu].c.user_regs.eflags |= PSL_T;
528 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
529 &ctxt[cpu])))
530 goto out_error_domctl;
531 /* FALLTHROUGH */
533 case PTRACE_CONT:
534 case PTRACE_DETACH:
535 if (current_isfile)
536 goto out_unsupported; /* XXX not yet supported */
537 if ( request != PTRACE_SINGLESTEP )
538 {
539 FOREACH_CPU(cpumap, index) {
540 cpu = index - 1;
541 if (fetch_regs(xc_handle, cpu, NULL))
542 goto out_error;
543 /* Clear trace flag */
544 if ( ctxt[cpu].c.user_regs.eflags & PSL_T )
545 {
546 ctxt[cpu].c.user_regs.eflags &= ~PSL_T;
547 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
548 cpu, &ctxt[cpu])))
549 goto out_error_domctl;
550 }
551 }
552 }
553 if ( request == PTRACE_DETACH )
554 {
555 if ((retval = xc_domain_setdebugging(xc_handle, current_domid, 0)))
556 goto out_error_domctl;
557 }
558 regs_valid = 0;
559 if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
560 current_domid : -current_domid)))
561 goto out_error_domctl;
562 break;
564 case PTRACE_ATTACH:
565 current_domid = domid_tid;
566 current_isfile = (int)edata;
567 if (current_isfile)
568 break;
569 domctl.cmd = XEN_DOMCTL_getdomaininfo;
570 domctl.domain = current_domid;
571 retval = do_domctl(xc_handle, &domctl);
572 if ( retval || (domctl.domain != current_domid) )
573 goto out_error_domctl;
574 if ( domctl.u.getdomaininfo.flags & XEN_DOMINF_paused )
575 IPRINTF("domain currently paused\n");
576 else if ((retval = xc_domain_pause(xc_handle, current_domid)))
577 goto out_error_domctl;
578 current_is_hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
579 if ((retval = xc_domain_setdebugging(xc_handle, current_domid, 1)))
580 goto out_error_domctl;
582 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
583 IPRINTF("get_online_cpumap failed\n");
584 if (online_cpumap != cpumap)
585 online_vcpus_changed(cpumap);
586 break;
588 case PTRACE_TRACEME:
589 IPRINTF("PTRACE_TRACEME is an invalid request under Xen\n");
590 goto out_error;
592 default:
593 goto out_unsupported; /* XXX not yet supported */
594 }
596 return retval;
598 out_error_domctl:
599 perror("domctl failed");
600 out_error:
601 errno = EINVAL;
602 return retval;
604 out_unsupported:
605 #ifdef DEBUG
606 IPRINTF("unsupported xc_ptrace request %s\n", ptrace_names[request]);
607 #endif
608 errno = ENOSYS;
609 return -1;
611 }
613 int
614 xc_waitdomain(
615 int xc_handle,
616 int domain,
617 int *status,
618 int options)
619 {
620 if (current_isfile)
621 return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
622 return __xc_waitdomain(xc_handle, domain, status, options);
623 }
625 /*
626 * Local variables:
627 * mode: C
628 * c-set-style: "BSD"
629 * c-basic-offset: 4
630 * tab-width: 4
631 * indent-tabs-mode: nil
632 * End:
633 */