ia64/xen-unstable

view tools/libxc/xc_ptrace_core.c @ 8964:8946b6dcd49e

Fix x86_64 Xen build.

event_callback_cs and failsafe_callback_cs are x86_32 only.

Signed-off-by: Ian Campbell <Ian.Campbell@XenSource.com>
author Ian.Campbell@xensource.com
date Wed Feb 22 17:26:39 2006 +0000 (2006-02-22)
parents f1b361b05bf3
children 26eff2448966
line source
1 #include <sys/ptrace.h>
2 #include <sys/wait.h>
3 #include "xc_private.h"
4 #include <time.h>
6 #define BSD_PAGE_MASK (PAGE_SIZE-1)
7 #define PDRSHIFT 22
8 #define VCPU 0 /* XXX */
10 /*
11 * long
12 * ptrace(enum __ptrace_request request, pid_t pid, void *addr, void *data);
13 */
15 struct gdb_regs {
16 long ebx; /* 0 */
17 long ecx; /* 4 */
18 long edx; /* 8 */
19 long esi; /* 12 */
20 long edi; /* 16 */
21 long ebp; /* 20 */
22 long eax; /* 24 */
23 int xds; /* 28 */
24 int xes; /* 32 */
25 int xfs; /* 36 */
26 int xgs; /* 40 */
27 long orig_eax; /* 44 */
28 long eip; /* 48 */
29 int xcs; /* 52 */
30 long eflags; /* 56 */
31 long esp; /* 60 */
32 int xss; /* 64 */
33 };
35 #define printval(x) printf("%s = %lx\n", #x, (long)x);
36 #define SET_PT_REGS(pt, xc) \
37 { \
38 pt.ebx = xc.ebx; \
39 pt.ecx = xc.ecx; \
40 pt.edx = xc.edx; \
41 pt.esi = xc.esi; \
42 pt.edi = xc.edi; \
43 pt.ebp = xc.ebp; \
44 pt.eax = xc.eax; \
45 pt.eip = xc.eip; \
46 pt.xcs = xc.cs; \
47 pt.eflags = xc.eflags; \
48 pt.esp = xc.esp; \
49 pt.xss = xc.ss; \
50 pt.xes = xc.es; \
51 pt.xds = xc.ds; \
52 pt.xfs = xc.fs; \
53 pt.xgs = xc.gs; \
54 }
56 #define SET_XC_REGS(pt, xc) \
57 { \
58 xc.ebx = pt->ebx; \
59 xc.ecx = pt->ecx; \
60 xc.edx = pt->edx; \
61 xc.esi = pt->esi; \
62 xc.edi = pt->edi; \
63 xc.ebp = pt->ebp; \
64 xc.eax = pt->eax; \
65 xc.eip = pt->eip; \
66 xc.cs = pt->xcs; \
67 xc.eflags = pt->eflags; \
68 xc.esp = pt->esp; \
69 xc.ss = pt->xss; \
70 xc.es = pt->xes; \
71 xc.ds = pt->xds; \
72 xc.fs = pt->xfs; \
73 xc.gs = pt->xgs; \
74 }
77 #define vtopdi(va) ((va) >> PDRSHIFT)
78 #define vtopti(va) (((va) >> PAGE_SHIFT) & 0x3ff)
80 /* XXX application state */
82 static long nr_pages = 0;
83 static unsigned long *p2m_array = NULL;
84 static unsigned long *m2p_array = NULL;
85 static unsigned long pages_offset;
86 static unsigned long cr3[MAX_VIRT_CPUS];
87 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
89 /* --------------------- */
91 static unsigned long
92 map_mtop_offset(unsigned long ma)
93 {
94 return pages_offset + (m2p_array[ma >> PAGE_SHIFT] << PAGE_SHIFT);
95 }
98 static void *
99 map_domain_va(unsigned long domfd, int cpu, void * guest_va)
100 {
101 unsigned long pde, page;
102 unsigned long va = (unsigned long)guest_va;
103 void *v;
105 static unsigned long cr3_phys[MAX_VIRT_CPUS];
106 static unsigned long *cr3_virt[MAX_VIRT_CPUS];
107 static unsigned long pde_phys[MAX_VIRT_CPUS];
108 static unsigned long *pde_virt[MAX_VIRT_CPUS];
109 static unsigned long page_phys[MAX_VIRT_CPUS];
110 static unsigned long *page_virt[MAX_VIRT_CPUS];
112 if (cr3[cpu] != cr3_phys[cpu])
113 {
114 cr3_phys[cpu] = cr3[cpu];
115 if (cr3_virt[cpu])
116 munmap(cr3_virt[cpu], PAGE_SIZE);
117 v = mmap(
118 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
119 map_mtop_offset(cr3_phys[cpu]));
120 if (v == MAP_FAILED)
121 {
122 perror("mmap failed");
123 goto error_out;
124 }
125 cr3_virt[cpu] = v;
126 }
127 if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
128 goto error_out;
129 if (ctxt[cpu].flags & VGCF_HVM_GUEST)
130 pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
131 if (pde != pde_phys[cpu])
132 {
133 pde_phys[cpu] = pde;
134 if (pde_virt[cpu])
135 munmap(pde_virt[cpu], PAGE_SIZE);
136 v = mmap(
137 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
138 map_mtop_offset(pde_phys[cpu]));
139 if (v == MAP_FAILED)
140 goto error_out;
141 pde_virt[cpu] = v;
142 }
143 if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
144 goto error_out;
145 if (ctxt[cpu].flags & VGCF_HVM_GUEST)
146 page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
147 if (page != page_phys[cpu])
148 {
149 page_phys[cpu] = page;
150 if (page_virt[cpu])
151 munmap(page_virt[cpu], PAGE_SIZE);
152 v = mmap(
153 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
154 map_mtop_offset(page_phys[cpu]));
155 if (v == MAP_FAILED) {
156 printf("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, vtopti(va));
157 page_phys[cpu] = 0;
158 goto error_out;
159 }
160 page_virt[cpu] = v;
161 }
162 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
164 error_out:
165 return 0;
166 }
168 int
169 xc_waitdomain_core(
170 int xc_handle,
171 int domfd,
172 int *status,
173 int options)
174 {
175 int retval = -1;
176 int nr_vcpus;
177 int i;
178 xc_core_header_t header;
180 if (nr_pages == 0) {
182 if (read(domfd, &header, sizeof(header)) != sizeof(header))
183 return -1;
185 nr_pages = header.xch_nr_pages;
186 nr_vcpus = header.xch_nr_vcpus;
187 pages_offset = header.xch_pages_offset;
189 if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
190 sizeof(vcpu_guest_context_t)*nr_vcpus)
191 return -1;
193 for (i = 0; i < nr_vcpus; i++) {
194 cr3[i] = ctxt[i].ctrlreg[3];
195 }
196 if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
197 printf("Could not allocate p2m_array\n");
198 goto error_out;
199 }
200 if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
201 sizeof(unsigned long)*nr_pages)
202 return -1;
204 if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL) {
205 printf("Could not allocate m2p array\n");
206 goto error_out;
207 }
208 bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
210 for (i = 0; i < nr_pages; i++) {
211 m2p_array[p2m_array[i]] = i;
212 }
214 }
215 retval = 0;
216 error_out:
217 return retval;
219 }
221 long
222 xc_ptrace_core(
223 int xc_handle,
224 enum __ptrace_request request,
225 uint32_t domfd,
226 long eaddr,
227 long edata)
228 {
229 int status = 0;
230 struct gdb_regs pt;
231 long retval = 0;
232 unsigned long *guest_va;
233 int cpu = VCPU;
234 void *addr = (char *)eaddr;
235 void *data = (char *)edata;
237 #if 0
238 printf("%20s %d, %p, %p \n", ptrace_names[request], domid, addr, data);
239 #endif
240 switch (request) {
241 case PTRACE_PEEKTEXT:
242 case PTRACE_PEEKDATA:
243 if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) {
244 status = EFAULT;
245 goto error_out;
246 }
248 retval = *guest_va;
249 break;
250 case PTRACE_POKETEXT:
251 case PTRACE_POKEDATA:
252 if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) {
253 status = EFAULT;
254 goto error_out;
255 }
256 *guest_va = (unsigned long)data;
257 break;
258 case PTRACE_GETREGS:
259 case PTRACE_GETFPREGS:
260 case PTRACE_GETFPXREGS:
261 if (request == PTRACE_GETREGS) {
262 SET_PT_REGS(pt, ctxt[cpu].user_regs);
263 memcpy(data, &pt, sizeof(struct gdb_regs));
264 } else if (request == PTRACE_GETFPREGS)
265 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
266 else /*if (request == PTRACE_GETFPXREGS)*/
267 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
268 break;
269 case PTRACE_ATTACH:
270 retval = 0;
271 break;
272 case PTRACE_SETREGS:
273 case PTRACE_SINGLESTEP:
274 case PTRACE_CONT:
275 case PTRACE_DETACH:
276 case PTRACE_SETFPREGS:
277 case PTRACE_SETFPXREGS:
278 case PTRACE_PEEKUSER:
279 case PTRACE_POKEUSER:
280 case PTRACE_SYSCALL:
281 case PTRACE_KILL:
282 #ifdef DEBUG
283 printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
284 #endif
285 status = ENOSYS;
286 break;
287 case PTRACE_TRACEME:
288 printf("PTRACE_TRACEME is an invalid request under Xen\n");
289 status = EINVAL;
290 }
292 if (status) {
293 errno = status;
294 retval = -1;
295 }
296 error_out:
297 return retval;
298 }
300 /*
301 * Local variables:
302 * mode: C
303 * c-set-style: "BSD"
304 * c-basic-offset: 4
305 * tab-width: 4
306 * indent-tabs-mode: nil
307 * End:
308 */