ia64/xen-unstable

view tools/libxc/xc_ptrace.c @ 9488:0a6f5527ca4b

[IA64] set itv handoff as masked and enable reading irr[0-3]

Set initial vcpu itv handoff state to mask the timer vector.
This seems to match hardware and makes logical sense from a
spurious interrupt perspective. Enable vcpu_get_irr[0-3]
functions as they seem to work and have the proper backing.
This enables the check_sal_cache_flush() in arch/ia64/kernel.sal.c
to work unmodified, allowing us to remove the Xen changes from
the file (and thus the file from the sparse tree).

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Apr 04 09:39:45 2006 -0600 (2006-04-04)
parents 30ae67d6e5f0
children f0e14b4e535c
line source
1 #define XC_PTRACE_PRIVATE
3 #include <sys/ptrace.h>
4 #include <sys/wait.h>
5 #include <time.h>
7 #include "xc_private.h"
8 #include "xg_private.h"
9 #include "xc_ptrace.h"
11 #ifdef DEBUG
12 static char *ptrace_names[] = {
13 "PTRACE_TRACEME",
14 "PTRACE_PEEKTEXT",
15 "PTRACE_PEEKDATA",
16 "PTRACE_PEEKUSER",
17 "PTRACE_POKETEXT",
18 "PTRACE_POKEDATA",
19 "PTRACE_POKEUSER",
20 "PTRACE_CONT",
21 "PTRACE_KILL",
22 "PTRACE_SINGLESTEP",
23 "PTRACE_INVALID",
24 "PTRACE_INVALID",
25 "PTRACE_GETREGS",
26 "PTRACE_SETREGS",
27 "PTRACE_GETFPREGS",
28 "PTRACE_SETFPREGS",
29 "PTRACE_ATTACH",
30 "PTRACE_DETACH",
31 "PTRACE_GETFPXREGS",
32 "PTRACE_SETFPXREGS",
33 "PTRACE_INVALID",
34 "PTRACE_INVALID",
35 "PTRACE_INVALID",
36 "PTRACE_INVALID",
37 "PTRACE_SYSCALL",
38 };
39 #endif
41 static int current_domid = -1;
42 static int current_isfile;
44 static cpumap_t online_cpumap;
45 static cpumap_t regs_valid;
46 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
48 extern int ffsll(long long int);
49 #define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
52 static int
53 fetch_regs(int xc_handle, int cpu, int *online)
54 {
55 xc_vcpuinfo_t info;
56 int retval = 0;
58 if (online)
59 *online = 0;
60 if ( !(regs_valid & (1 << cpu)) )
61 {
62 retval = xc_vcpu_getcontext(xc_handle, current_domid,
63 cpu, &ctxt[cpu]);
64 if ( retval )
65 goto done;
66 regs_valid |= (1 << cpu);
68 }
69 if ( online == NULL )
70 goto done;
72 retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
73 *online = info.online;
75 done:
76 return retval;
77 }
79 static struct thr_ev_handlers {
80 thr_ev_handler_t td_create;
81 thr_ev_handler_t td_death;
82 } handlers;
84 void
85 xc_register_event_handler(thr_ev_handler_t h,
86 td_event_e e)
87 {
88 switch (e) {
89 case TD_CREATE:
90 handlers.td_create = h;
91 break;
92 case TD_DEATH:
93 handlers.td_death = h;
94 break;
95 default:
96 abort(); /* XXX */
97 }
98 }
100 static inline int
101 paging_enabled(vcpu_guest_context_t *v)
102 {
103 unsigned long cr0 = v->ctrlreg[0];
104 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
105 }
107 /*
108 * Fetch registers for all online cpus and set the cpumap
109 * to indicate which cpus are online
110 *
111 */
113 static int
114 get_online_cpumap(int xc_handle, dom0_getdomaininfo_t *d, cpumap_t *cpumap)
115 {
116 int i, online, retval;
118 *cpumap = 0;
119 for (i = 0; i <= d->max_vcpu_id; i++) {
120 if ((retval = fetch_regs(xc_handle, i, &online)))
121 return retval;
122 if (online)
123 *cpumap |= (1 << i);
124 }
126 return 0;
127 }
129 /*
130 * Notify GDB of any vcpus that have come online or gone offline
131 * update online_cpumap
132 *
133 */
135 static void
136 online_vcpus_changed(cpumap_t cpumap)
137 {
138 cpumap_t changed_cpumap = cpumap ^ online_cpumap;
139 int index;
141 while ( (index = ffsll(changed_cpumap)) ) {
142 if ( cpumap & (1 << (index - 1)) )
143 {
144 if (handlers.td_create) handlers.td_create(index - 1);
145 } else {
146 printf("thread death: %d\n", index - 1);
147 if (handlers.td_death) handlers.td_death(index - 1);
148 }
149 changed_cpumap &= ~(1 << (index - 1));
150 }
151 online_cpumap = cpumap;
153 }
155 /* --------------------- */
157 static void *
158 map_domain_va_pae(
159 int xc_handle,
160 int cpu,
161 void *guest_va,
162 int perm)
163 {
164 unsigned long l2p, l1p, p, va = (unsigned long)guest_va;
165 uint64_t *l3, *l2, *l1;
166 static void *v;
168 if (fetch_regs(xc_handle, cpu, NULL))
169 return NULL;
171 l3 = xc_map_foreign_range(
172 xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
173 if ( l3 == NULL )
174 return NULL;
176 l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
177 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
178 if ( l2 == NULL )
179 return NULL;
181 l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
182 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
183 if ( l1 == NULL )
184 return NULL;
186 p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
187 if ( v != NULL )
188 munmap(v, PAGE_SIZE);
189 v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
190 if ( v == NULL )
191 return NULL;
193 return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
194 }
196 #ifdef __x86_64__
197 static void *
198 map_domain_va(
199 int xc_handle,
200 int cpu,
201 void *guest_va,
202 int perm)
203 {
204 unsigned long l3p, l2p, l1p, p, va = (unsigned long)guest_va;
205 uint64_t *l4, *l3, *l2, *l1;
206 static void *v;
208 if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
209 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
211 if (fetch_regs(xc_handle, cpu, NULL))
212 return NULL;
214 l4 = xc_map_foreign_range(
215 xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
216 if ( l4 == NULL )
217 return NULL;
219 l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT;
220 l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p);
221 if ( l3 == NULL )
222 return NULL;
224 l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT;
225 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
226 if ( l2 == NULL )
227 return NULL;
229 l1p = l2[l2_table_offset(va)] >> PAGE_SHIFT;
230 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
231 if ( l1 == NULL )
232 return NULL;
234 p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
235 if ( v != NULL )
236 munmap(v, PAGE_SIZE);
237 v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
238 if ( v == NULL )
239 return NULL;
241 return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
242 }
243 #endif
245 #ifdef __i386__
246 /* XXX application state */
247 static long nr_pages = 0;
248 static unsigned long *page_array = NULL;
250 static void *
251 map_domain_va(
252 int xc_handle,
253 int cpu,
254 void *guest_va,
255 int perm)
256 {
258 unsigned long pde, page;
259 unsigned long va = (unsigned long)guest_va;
260 long npgs = xc_get_tot_pages(xc_handle, current_domid);
263 static uint32_t cr3_phys[MAX_VIRT_CPUS];
264 static unsigned long *cr3_virt[MAX_VIRT_CPUS];
265 static unsigned long pde_phys[MAX_VIRT_CPUS];
266 static unsigned long *pde_virt[MAX_VIRT_CPUS];
267 static unsigned long page_phys[MAX_VIRT_CPUS];
268 static unsigned long *page_virt[MAX_VIRT_CPUS];
269 static int prev_perm[MAX_VIRT_CPUS];
270 static enum { MODE_UNKNOWN, MODE_32, MODE_PAE, MODE_64 } mode;
272 if ( mode == MODE_UNKNOWN )
273 {
274 xen_capabilities_info_t caps;
275 (void)xc_version(xc_handle, XENVER_capabilities, caps);
276 if ( strstr(caps, "-x86_64") )
277 mode = MODE_64;
278 else if ( strstr(caps, "-x86_32p") )
279 mode = MODE_PAE;
280 else if ( strstr(caps, "-x86_32") )
281 mode = MODE_32;
282 }
284 if ( mode == MODE_PAE )
285 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
287 if ( nr_pages != npgs )
288 {
289 if ( nr_pages > 0 )
290 free(page_array);
291 nr_pages = npgs;
292 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
293 {
294 printf("Could not allocate memory\n");
295 return NULL;
296 }
297 if ( xc_get_pfn_list(xc_handle, current_domid,
298 page_array, nr_pages) != nr_pages )
299 {
300 printf("Could not get the page frame list\n");
301 return NULL;
302 }
303 }
305 if (fetch_regs(xc_handle, cpu, NULL))
306 return NULL;
308 if (paging_enabled(&ctxt[cpu])) {
309 if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
310 {
311 cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
312 if ( cr3_virt[cpu] )
313 munmap(cr3_virt[cpu], PAGE_SIZE);
314 cr3_virt[cpu] = xc_map_foreign_range(
315 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
316 cr3_phys[cpu] >> PAGE_SHIFT);
317 if ( cr3_virt[cpu] == NULL )
318 return NULL;
319 }
320 if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
321 return NULL;
322 if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
323 pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
324 if ( pde != pde_phys[cpu] )
325 {
326 pde_phys[cpu] = pde;
327 if ( pde_virt[cpu] )
328 munmap(pde_virt[cpu], PAGE_SIZE);
329 pde_virt[cpu] = xc_map_foreign_range(
330 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
331 pde_phys[cpu] >> PAGE_SHIFT);
332 if ( pde_virt[cpu] == NULL )
333 return NULL;
334 }
335 if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
336 return NULL;
337 } else {
338 page = va;
339 }
340 if (ctxt[cpu].flags & VGCF_HVM_GUEST)
341 page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
342 if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
343 {
344 page_phys[cpu] = page;
345 if ( page_virt[cpu] )
346 munmap(page_virt[cpu], PAGE_SIZE);
347 page_virt[cpu] = xc_map_foreign_range(
348 xc_handle, current_domid, PAGE_SIZE, perm,
349 page_phys[cpu] >> PAGE_SHIFT);
350 if ( page_virt[cpu] == NULL )
351 {
352 page_phys[cpu] = 0;
353 return NULL;
354 }
355 prev_perm[cpu] = perm;
356 }
358 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
359 }
361 #endif
363 static int
364 __xc_waitdomain(
365 int xc_handle,
366 int domain,
367 int *status,
368 int options)
369 {
370 DECLARE_DOM0_OP;
371 int retval;
372 struct timespec ts;
373 cpumap_t cpumap;
375 ts.tv_sec = 0;
376 ts.tv_nsec = 10*1000*1000;
378 op.cmd = DOM0_GETDOMAININFO;
379 op.u.getdomaininfo.domain = domain;
382 retry:
383 retval = do_dom0_op(xc_handle, &op);
384 if ( retval || (op.u.getdomaininfo.domain != domain) )
385 {
386 printf("getdomaininfo failed\n");
387 goto done;
388 }
389 *status = op.u.getdomaininfo.flags;
391 if ( options & WNOHANG )
392 goto done;
394 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) )
395 {
396 nanosleep(&ts,NULL);
397 goto retry;
398 }
399 /* XXX check for ^C here */
400 done:
401 if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
402 printf("get_online_cpumap failed\n");
403 if (online_cpumap != cpumap)
404 online_vcpus_changed(cpumap);
405 return retval;
407 }
410 long
411 xc_ptrace(
412 int xc_handle,
413 enum __ptrace_request request,
414 uint32_t domid_tid,
415 long eaddr,
416 long edata)
417 {
418 DECLARE_DOM0_OP;
419 struct gdb_regs pt;
420 long retval = 0;
421 unsigned long *guest_va;
422 cpumap_t cpumap;
423 int cpu, index;
424 void *addr = (char *)eaddr;
425 void *data = (char *)edata;
427 cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
429 switch ( request )
430 {
431 case PTRACE_PEEKTEXT:
432 case PTRACE_PEEKDATA:
433 if (current_isfile)
434 guest_va = (unsigned long *)map_domain_va_core(current_domid,
435 cpu, addr, ctxt);
436 else
437 guest_va = (unsigned long *)map_domain_va(xc_handle,
438 cpu, addr, PROT_READ);
439 if ( guest_va == NULL )
440 goto out_error;
441 retval = *guest_va;
442 break;
444 case PTRACE_POKETEXT:
445 case PTRACE_POKEDATA:
446 /* XXX assume that all CPUs have the same address space */
447 if (current_isfile)
448 guest_va = (unsigned long *)map_domain_va_core(current_domid,
449 cpu, addr, ctxt);
450 else
451 guest_va = (unsigned long *)map_domain_va(xc_handle,
452 cpu, addr, PROT_READ|PROT_WRITE);
453 if ( guest_va == NULL )
454 goto out_error;
455 *guest_va = (unsigned long)data;
456 break;
458 case PTRACE_GETREGS:
459 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
460 goto out_error;
461 SET_PT_REGS(pt, ctxt[cpu].user_regs);
462 memcpy(data, &pt, sizeof(struct gdb_regs));
463 break;
465 case PTRACE_GETFPREGS:
466 case PTRACE_GETFPXREGS:
467 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
468 goto out_error;
469 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
470 break;
472 case PTRACE_SETREGS:
473 if (!current_isfile)
474 goto out_unspported; /* XXX not yet supported */
475 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
476 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
477 &ctxt[cpu])))
478 goto out_error_dom0;
479 break;
481 case PTRACE_SINGLESTEP:
482 if (!current_isfile)
483 goto out_unspported; /* XXX not yet supported */
484 /* XXX we can still have problems if the user switches threads
485 * during single-stepping - but that just seems retarded
486 */
487 ctxt[cpu].user_regs.eflags |= PSL_T;
488 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
489 &ctxt[cpu])))
490 goto out_error_dom0;
491 /* FALLTHROUGH */
493 case PTRACE_CONT:
494 case PTRACE_DETACH:
495 if (!current_isfile)
496 goto out_unspported; /* XXX not yet supported */
497 if ( request != PTRACE_SINGLESTEP )
498 {
499 FOREACH_CPU(cpumap, index) {
500 cpu = index - 1;
501 if (fetch_regs(xc_handle, cpu, NULL))
502 goto out_error;
503 /* Clear trace flag */
504 if ( ctxt[cpu].user_regs.eflags & PSL_T )
505 {
506 ctxt[cpu].user_regs.eflags &= ~PSL_T;
507 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
508 cpu, &ctxt[cpu])))
509 goto out_error_dom0;
510 }
511 }
512 }
513 if ( request == PTRACE_DETACH )
514 {
515 op.cmd = DOM0_SETDEBUGGING;
516 op.u.setdebugging.domain = current_domid;
517 op.u.setdebugging.enable = 0;
518 if ((retval = do_dom0_op(xc_handle, &op)))
519 goto out_error_dom0;
520 }
521 regs_valid = 0;
522 if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
523 current_domid : -current_domid)))
524 goto out_error_dom0;
525 break;
527 case PTRACE_ATTACH:
528 current_domid = domid_tid;
529 current_isfile = (int)edata;
530 if (current_isfile)
531 break;
532 op.cmd = DOM0_GETDOMAININFO;
533 op.u.getdomaininfo.domain = current_domid;
534 retval = do_dom0_op(xc_handle, &op);
535 if ( retval || (op.u.getdomaininfo.domain != current_domid) )
536 goto out_error_dom0;
537 if ( op.u.getdomaininfo.flags & DOMFLAGS_PAUSED )
538 printf("domain currently paused\n");
539 else if ((retval = xc_domain_pause(xc_handle, current_domid)))
540 goto out_error_dom0;
541 op.cmd = DOM0_SETDEBUGGING;
542 op.u.setdebugging.domain = current_domid;
543 op.u.setdebugging.enable = 1;
544 if ((retval = do_dom0_op(xc_handle, &op)))
545 goto out_error_dom0;
547 if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
548 printf("get_online_cpumap failed\n");
549 if (online_cpumap != cpumap)
550 online_vcpus_changed(cpumap);
551 break;
553 case PTRACE_SETFPREGS:
554 case PTRACE_SETFPXREGS:
555 case PTRACE_PEEKUSER:
556 case PTRACE_POKEUSER:
557 case PTRACE_SYSCALL:
558 case PTRACE_KILL:
559 goto out_unspported; /* XXX not yet supported */
561 case PTRACE_TRACEME:
562 printf("PTRACE_TRACEME is an invalid request under Xen\n");
563 goto out_error;
564 }
566 return retval;
568 out_error_dom0:
569 perror("dom0 op failed");
570 out_error:
571 errno = EINVAL;
572 return retval;
574 out_unspported:
575 #ifdef DEBUG
576 printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
577 #endif
578 errno = ENOSYS;
579 return -1;
581 }
583 int
584 xc_waitdomain(
585 int xc_handle,
586 int domain,
587 int *status,
588 int options)
589 {
590 if (current_isfile)
591 return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
592 return __xc_waitdomain(xc_handle, domain, status, options);
593 }
595 /*
596 * Local variables:
597 * mode: C
598 * c-set-style: "BSD"
599 * c-basic-offset: 4
600 * tab-width: 4
601 * indent-tabs-mode: nil
602 * End:
603 */