ia64/xen-unstable
changeset 12837:aab2b3f739d2
Merge
author | Alastair Tse <atse@xensource.com> |
---|---|
date | Thu Dec 07 11:44:05 2006 +0000 (2006-12-07) |
parents | f7cff5d296bf 2dd4569e0640 |
children | 8e035701b9ff |
files |
line diff
1.1 --- a/tools/firmware/vmxassist/head.S Thu Dec 07 11:41:38 2006 +0000 1.2 +++ b/tools/firmware/vmxassist/head.S Thu Dec 07 11:44:05 2006 +0000 1.3 @@ -130,7 +130,7 @@ 1: 1.4 clts 1.5 1.6 /* setup my own stack */ 1.7 - movl $stack_top - 4*4, %esp 1.8 + movl $stack_top, %esp 1.9 movl %esp, %ebp 1.10 1.11 /* go ... */
2.1 --- a/tools/firmware/vmxassist/setup.c Thu Dec 07 11:41:38 2006 +0000 2.2 +++ b/tools/firmware/vmxassist/setup.c Thu Dec 07 11:44:05 2006 +0000 2.3 @@ -125,7 +125,7 @@ setup_gdt(void) 2.4 /* setup task state segment */ 2.5 memset(&tss, 0, sizeof(tss)); 2.6 tss.ss0 = DATA_SELECTOR; 2.7 - tss.esp0 = (unsigned) stack_top - 4*4; 2.8 + tss.esp0 = (unsigned) stack_top; 2.9 tss.iomap_base = offsetof(struct tss, iomap); 2.10 2.11 /* initialize gdt's tss selector */ 2.12 @@ -258,7 +258,7 @@ setup_ctx(void) 2.13 2.14 memset(c, 0, sizeof(*c)); 2.15 c->eip = (unsigned long) switch_to_real_mode; 2.16 - c->esp = (unsigned) stack_top - 4*4; 2.17 + c->esp = (unsigned) stack_top; 2.18 c->eflags = 0x2; /* no interrupts, please */ 2.19 2.20 /*
3.1 --- a/tools/firmware/vmxassist/vm86.c Thu Dec 07 11:41:38 2006 +0000 3.2 +++ b/tools/firmware/vmxassist/vm86.c Thu Dec 07 11:44:05 2006 +0000 3.3 @@ -1,6 +1,6 @@ 3.4 /* 3.5 * vm86.c: A vm86 emulator. The main purpose of this emulator is to do as 3.6 - * little work as possible. 3.7 + * little work as possible. 3.8 * 3.9 * Leendert van Doorn, leendert@watson.ibm.com 3.10 * Copyright (c) 2005-2006, International Business Machines Corporation. 3.11 @@ -52,8 +52,8 @@ char *states[] = { 3.12 static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" }; 3.13 #endif /* DEBUG */ 3.14 3.15 -#define PDE_PS (1 << 7) 3.16 -#define PT_ENTRY_PRESENT 0x1 3.17 +#define PDE_PS (1 << 7) 3.18 +#define PT_ENTRY_PRESENT 0x1 3.19 3.20 /* We only support access to <=4G physical memory due to 1:1 mapping */ 3.21 static uint64_t 3.22 @@ -136,7 +136,7 @@ address(struct regs *regs, unsigned seg, 3.23 } 3.24 3.25 if (mode == VM86_REAL || seg > oldctx.gdtr_limit || 3.26 - (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg)) 3.27 + (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg)) 3.28 return ((seg & 0xFFFF) << 4) + off; 3.29 3.30 gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base); 3.31 @@ -153,13 +153,13 @@ address(struct regs *regs, unsigned seg, 3.32 seg_limit = (entry_high & 0xF0000) | (entry_low & 0xFFFF); 3.33 3.34 if (entry_high & 0x8000 && 3.35 - ((entry_high & 0x800000 && off >> 12 <= seg_limit) || 3.36 - (!(entry_high & 0x800000) && off <= seg_limit))) 3.37 + ((entry_high & 0x800000 && off >> 12 <= seg_limit) || 3.38 + (!(entry_high & 0x800000) && off <= seg_limit))) 3.39 return seg_base + off; 3.40 3.41 panic("should never reach here in function address():\n\t" 3.42 - "entry=0x%08x%08x, mode=%d, seg=0x%08x, offset=0x%08x\n", 3.43 - entry_high, entry_low, mode, seg, off); 3.44 + "entry=0x%08x%08x, mode=%d, seg=0x%08x, offset=0x%08x\n", 3.45 + entry_high, entry_low, mode, seg, off); 3.46 3.47 return 0; 3.48 } 3.49 @@ -172,7 +172,7 @@ trace(struct regs *regs, int adjust, cha 3.50 va_list ap; 3.51 3.52 if ((traceset & (1 << mode)) && 3.53 - (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) { 3.54 + (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) { 3.55 /* 16-bit, seg:off addressing */ 3.56 unsigned addr = address(regs, regs->cs, off); 3.57 printf("0x%08x: 0x%x:0x%04x ", addr, regs->cs, off); 3.58 @@ -183,7 +183,7 @@ trace(struct regs *regs, int adjust, cha 3.59 printf("\n"); 3.60 } 3.61 if ((traceset & (1 << mode)) && 3.62 - (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) { 3.63 + (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) { 3.64 /* 16-bit, gdt addressing */ 3.65 unsigned addr = address(regs, regs->cs, off); 3.66 printf("0x%08x: 0x%x:0x%08x ", addr, regs->cs, off); 3.67 @@ -430,7 +430,7 @@ operand(unsigned prefix, struct regs *re 3.68 case 2: return address(regs, seg, regs->edx); 3.69 case 3: return address(regs, seg, regs->ebx); 3.70 case 4: return address(regs, seg, 3.71 - sib(regs, mod, fetch8(regs))); 3.72 + sib(regs, mod, fetch8(regs))); 3.73 case 5: return address(regs, seg, fetch32(regs)); 3.74 case 6: return address(regs, seg, regs->esi); 3.75 case 7: return address(regs, seg, regs->edi); 3.76 @@ -450,7 +450,7 @@ operand(unsigned prefix, struct regs *re 3.77 case 2: return address(regs, seg, regs->edx + disp); 3.78 case 3: return address(regs, seg, regs->ebx + disp); 3.79 case 4: return address(regs, seg, 3.80 - sib(regs, mod, fetch8(regs))); 3.81 + sib(regs, mod, fetch8(regs))); 3.82 case 5: return address(regs, seg, regs->ebp + disp); 3.83 case 6: return address(regs, seg, regs->esi + disp); 3.84 case 7: return address(regs, seg, regs->edi + disp); 3.85 @@ -507,7 +507,7 @@ operand(unsigned prefix, struct regs *re 3.86 } 3.87 } 3.88 3.89 - return 0; 3.90 + return 0; 3.91 } 3.92 3.93 /* 3.94 @@ -859,7 +859,7 @@ mov_to_seg(struct regs *regs, unsigned p 3.95 3.96 fail: 3.97 printf("%s:%d: missed opcode %02x %02x\n", 3.98 - __FUNCTION__, __LINE__, opc, modrm); 3.99 + __FUNCTION__, __LINE__, opc, modrm); 3.100 return 0; 3.101 } 3.102 3.103 @@ -896,11 +896,11 @@ load_seg(unsigned long sel, uint32_t *ba 3.104 ((entry >> (32-16)) & 0x00FF0000) | 3.105 ((entry >> ( 16)) & 0x0000FFFF)); 3.106 *limit = (((entry >> (48-16)) & 0x000F0000) | 3.107 - ((entry ) & 0x0000FFFF)); 3.108 + (entry & 0x0000FFFF)); 3.109 3.110 arbytes->bytes = 0; 3.111 arbytes->fields.seg_type = (entry >> (8+32)) & 0xF; /* TYPE */ 3.112 - arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */ 3.113 + arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */ 3.114 if (arbytes->fields.s) 3.115 arbytes->fields.seg_type |= 1; /* accessed */ 3.116 arbytes->fields.dpl = (entry >> (13+32)) & 0x3; /* DPL */ 3.117 @@ -924,7 +924,7 @@ static void 3.118 load_or_clear_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes) 3.119 { 3.120 if (!load_seg(sel, base, limit, arbytes)) 3.121 - load_seg(0, base, limit, arbytes); 3.122 + load_seg(0, base, limit, arbytes); 3.123 } 3.124 3.125 3.126 @@ -988,21 +988,21 @@ real_mode(struct regs *regs) 3.127 panic("%%ss 0x%lx higher than 1MB", regs->uss); 3.128 regs->uss = address(regs, regs->uss, 0) >> 4; 3.129 } else { 3.130 - regs->uss = saved_rm_regs.uss; 3.131 + regs->uss = saved_rm_regs.uss; 3.132 } 3.133 if (regs->vds != 0) { 3.134 if (regs->vds >= HIGHMEM) 3.135 panic("%%ds 0x%lx higher than 1MB", regs->vds); 3.136 regs->vds = address(regs, regs->vds, 0) >> 4; 3.137 } else { 3.138 - regs->vds = saved_rm_regs.vds; 3.139 + regs->vds = saved_rm_regs.vds; 3.140 } 3.141 if (regs->ves != 0) { 3.142 if (regs->ves >= HIGHMEM) 3.143 panic("%%es 0x%lx higher than 1MB", regs->ves); 3.144 regs->ves = address(regs, regs->ves, 0) >> 4; 3.145 } else { 3.146 - regs->ves = saved_rm_regs.ves; 3.147 + regs->ves = saved_rm_regs.ves; 3.148 } 3.149 3.150 /* this should get us into 16-bit mode */ 3.151 @@ -1029,10 +1029,7 @@ set_mode(struct regs *regs, enum vm86_mo 3.152 (mode == VM86_REAL_TO_PROTECTED)) { 3.153 regs->eflags &= ~EFLAGS_TF; 3.154 real_mode(regs); 3.155 - break; 3.156 - } else if (mode == VM86_REAL) { 3.157 - break; 3.158 - } else 3.159 + } else if (mode != VM86_REAL) 3.160 panic("unexpected real mode transition"); 3.161 break; 3.162 3.163 @@ -1049,25 +1046,19 @@ set_mode(struct regs *regs, enum vm86_mo 3.164 oldctx.fs_sel = 0; 3.165 oldctx.gs_sel = 0; 3.166 oldctx.ss_sel = 0; 3.167 - break; 3.168 - } else if (mode == VM86_REAL_TO_PROTECTED) { 3.169 - break; 3.170 - } else 3.171 + } else if (mode != VM86_REAL_TO_PROTECTED) 3.172 panic("unexpected real-to-protected mode transition"); 3.173 break; 3.174 3.175 case VM86_PROTECTED_TO_REAL: 3.176 - if (mode == VM86_PROTECTED) { 3.177 - break; 3.178 - } else 3.179 + if (mode != VM86_PROTECTED) 3.180 panic("unexpected protected-to-real mode transition"); 3.181 break; 3.182 3.183 case VM86_PROTECTED: 3.184 - if (mode == VM86_REAL_TO_PROTECTED) { 3.185 - protected_mode(regs); 3.186 - } else 3.187 + if (mode != VM86_REAL_TO_PROTECTED) 3.188 panic("unexpected protected mode transition"); 3.189 + protected_mode(regs); 3.190 break; 3.191 } 3.192 3.193 @@ -1081,25 +1072,19 @@ jmpl(struct regs *regs, int prefix) 3.194 unsigned n = regs->eip; 3.195 unsigned cs, eip; 3.196 3.197 - if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */ 3.198 - eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs); 3.199 - cs = fetch16(regs); 3.200 + eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs); 3.201 + cs = fetch16(regs); 3.202 + 3.203 + TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.204 3.205 - TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.206 + regs->cs = cs; 3.207 + regs->eip = eip; 3.208 3.209 - regs->cs = cs; 3.210 - regs->eip = eip; 3.211 + if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */ 3.212 set_mode(regs, VM86_PROTECTED); 3.213 - } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */ 3.214 - eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs); 3.215 - cs = fetch16(regs); 3.216 - 3.217 - TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.218 - 3.219 - regs->cs = cs; 3.220 - regs->eip = eip; 3.221 + else if (mode == VM86_PROTECTED_TO_REAL)/* jump to real mode */ 3.222 set_mode(regs, VM86_REAL); 3.223 - } else 3.224 + else 3.225 panic("jmpl"); 3.226 } 3.227 3.228 @@ -1110,29 +1095,22 @@ jmpl_indirect(struct regs *regs, int pre 3.229 unsigned cs, eip; 3.230 unsigned addr; 3.231 3.232 - addr = operand(prefix, regs, modrm); 3.233 + addr = operand(prefix, regs, modrm); 3.234 + 3.235 + eip = (prefix & DATA32) ? read32(addr) : read16(addr); 3.236 + addr += (prefix & DATA32) ? 4 : 2; 3.237 + cs = read16(addr); 3.238 3.239 - if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */ 3.240 - eip = (prefix & DATA32) ? read32(addr) : read16(addr); 3.241 - addr += (prefix & DATA32) ? 4 : 2; 3.242 - cs = read16(addr); 3.243 + TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.244 3.245 - TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.246 + regs->cs = cs; 3.247 + regs->eip = eip; 3.248 3.249 - regs->cs = cs; 3.250 - regs->eip = eip; 3.251 + if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */ 3.252 set_mode(regs, VM86_PROTECTED); 3.253 - } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */ 3.254 - eip = (prefix & DATA32) ? read32(addr) : read16(addr); 3.255 - addr += (prefix & DATA32) ? 4 : 2; 3.256 - cs = read16(addr); 3.257 - 3.258 - TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); 3.259 - 3.260 - regs->cs = cs; 3.261 - regs->eip = eip; 3.262 + else if (mode == VM86_PROTECTED_TO_REAL)/* jump to real mode */ 3.263 set_mode(regs, VM86_REAL); 3.264 - } else 3.265 + else 3.266 panic("jmpl"); 3.267 } 3.268 3.269 @@ -1151,15 +1129,14 @@ retl(struct regs *regs, int prefix) 3.270 3.271 TRACE((regs, 1, "retl (to 0x%x:0x%x)", cs, eip)); 3.272 3.273 - if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */ 3.274 - regs->cs = cs; 3.275 - regs->eip = eip; 3.276 + regs->cs = cs; 3.277 + regs->eip = eip; 3.278 + 3.279 + if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */ 3.280 set_mode(regs, VM86_PROTECTED); 3.281 - } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */ 3.282 - regs->cs = cs; 3.283 - regs->eip = eip; 3.284 + else if (mode == VM86_PROTECTED_TO_REAL)/* jump to real mode */ 3.285 set_mode(regs, VM86_REAL); 3.286 - } else 3.287 + else 3.288 panic("retl"); 3.289 } 3.290 3.291 @@ -1259,8 +1236,8 @@ pushrm(struct regs *regs, int prefix, un 3.292 unsigned addr; 3.293 unsigned data; 3.294 3.295 - addr = operand(prefix, regs, modrm); 3.296 - 3.297 + addr = operand(prefix, regs, modrm); 3.298 + 3.299 if (prefix & DATA32) { 3.300 data = read32(addr); 3.301 push32(regs, data); 3.302 @@ -1386,11 +1363,11 @@ opcode(struct regs *regs) 3.303 case 0x3B: /* addr32 cmp r/m16, r16 */ 3.304 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED) 3.305 goto invalid; 3.306 - if ((prefix & ADDR32) == 0) 3.307 - goto invalid; 3.308 - if (!cmp(regs, prefix, opc)) 3.309 - goto invalid; 3.310 - return OPC_EMULATED; 3.311 + if ((prefix & ADDR32) == 0) 3.312 + goto invalid; 3.313 + if (!cmp(regs, prefix, opc)) 3.314 + goto invalid; 3.315 + return OPC_EMULATED; 3.316 3.317 case 0x3E: 3.318 TRACE((regs, regs->eip - eip, "%%ds:")); 3.319 @@ -1412,7 +1389,7 @@ opcode(struct regs *regs) 3.320 prefix |= DATA32; 3.321 continue; 3.322 3.323 - case 0x67: 3.324 + case 0x67: 3.325 TRACE((regs, regs->eip - eip, "addr32")); 3.326 prefix |= ADDR32; 3.327 continue; 3.328 @@ -1421,18 +1398,18 @@ opcode(struct regs *regs) 3.329 case 0x8A: /* addr32 mov r/m8, r8 */ 3.330 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED) 3.331 goto invalid; 3.332 - if ((prefix & ADDR32) == 0) 3.333 - goto invalid; 3.334 - if (!movr(regs, prefix, opc)) 3.335 - goto invalid; 3.336 - return OPC_EMULATED; 3.337 + if ((prefix & ADDR32) == 0) 3.338 + goto invalid; 3.339 + if (!movr(regs, prefix, opc)) 3.340 + goto invalid; 3.341 + return OPC_EMULATED; 3.342 3.343 case 0x89: /* addr32 mov r16, r/m16 */ 3.344 if (mode == VM86_PROTECTED_TO_REAL) { 3.345 unsigned modrm = fetch8(regs); 3.346 unsigned addr = operand(prefix, regs, modrm); 3.347 unsigned val, r = (modrm >> 3) & 7; 3.348 - 3.349 + 3.350 if (prefix & DATA32) { 3.351 val = getreg16(regs, r); 3.352 write32(addr, val); 3.353 @@ -1447,11 +1424,11 @@ opcode(struct regs *regs) 3.354 case 0x8B: /* addr32 mov r/m16, r16 */ 3.355 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED) 3.356 goto invalid; 3.357 - if ((prefix & ADDR32) == 0) 3.358 - goto invalid; 3.359 - if (!movr(regs, prefix, opc)) 3.360 - goto invalid; 3.361 - return OPC_EMULATED; 3.362 + if ((prefix & ADDR32) == 0) 3.363 + goto invalid; 3.364 + if (!movr(regs, prefix, opc)) 3.365 + goto invalid; 3.366 + return OPC_EMULATED; 3.367 3.368 case 0x8E: /* mov r16, sreg */ 3.369 if (!mov_to_seg(regs, prefix, opc)) 3.370 @@ -1459,11 +1436,11 @@ opcode(struct regs *regs) 3.371 return OPC_EMULATED; 3.372 3.373 case 0x8F: /* addr32 pop r/m16 */ 3.374 - if ((prefix & ADDR32) == 0) 3.375 - goto invalid; 3.376 - if (!pop(regs, prefix, opc)) 3.377 - goto invalid; 3.378 - return OPC_EMULATED; 3.379 + if ((prefix & ADDR32) == 0) 3.380 + goto invalid; 3.381 + if (!pop(regs, prefix, opc)) 3.382 + goto invalid; 3.383 + return OPC_EMULATED; 3.384 3.385 case 0x90: /* nop */ 3.386 TRACE((regs, regs->eip - eip, "nop")); 3.387 @@ -1487,7 +1464,7 @@ opcode(struct regs *regs) 3.388 regs->eflags |= EFLAGS_VM; 3.389 return OPC_EMULATED; 3.390 3.391 - case 0xA1: /* mov ax, r/m16 */ 3.392 + case 0xA1: /* mov ax, r/m16 */ 3.393 { 3.394 int addr, data; 3.395 int seg = segment(prefix, regs, regs->vds); 3.396 @@ -1521,15 +1498,15 @@ opcode(struct regs *regs) 3.397 return OPC_EMULATED; 3.398 3.399 case 0xC6: /* addr32 movb $imm, r/m8 */ 3.400 - if ((prefix & ADDR32) == 0) 3.401 - goto invalid; 3.402 - if (!movr(regs, prefix, opc)) 3.403 - goto invalid; 3.404 + if ((prefix & ADDR32) == 0) 3.405 + goto invalid; 3.406 + if (!movr(regs, prefix, opc)) 3.407 + goto invalid; 3.408 return OPC_EMULATED; 3.409 3.410 case 0xCB: /* retl */ 3.411 if ((mode == VM86_REAL_TO_PROTECTED) || 3.412 - (mode == VM86_PROTECTED_TO_REAL)) { 3.413 + (mode == VM86_PROTECTED_TO_REAL)) { 3.414 retl(regs, prefix); 3.415 return OPC_INVALID; 3.416 } 3.417 @@ -1567,7 +1544,7 @@ opcode(struct regs *regs) 3.418 3.419 case 0xEA: /* jmpl */ 3.420 if ((mode == VM86_REAL_TO_PROTECTED) || 3.421 - (mode == VM86_PROTECTED_TO_REAL)) { 3.422 + (mode == VM86_PROTECTED_TO_REAL)) { 3.423 jmpl(regs, prefix); 3.424 return OPC_INVALID; 3.425 } 3.426 @@ -1579,7 +1556,7 @@ opcode(struct regs *regs) 3.427 switch((modrm >> 3) & 7) { 3.428 case 5: /* jmpl (indirect) */ 3.429 if ((mode == VM86_REAL_TO_PROTECTED) || 3.430 - (mode == VM86_PROTECTED_TO_REAL)) { 3.431 + (mode == VM86_PROTECTED_TO_REAL)) { 3.432 jmpl_indirect(regs, prefix, modrm); 3.433 return OPC_INVALID; 3.434 } 3.435 @@ -1596,7 +1573,7 @@ opcode(struct regs *regs) 3.436 3.437 case 0xEB: /* short jump */ 3.438 if ((mode == VM86_REAL_TO_PROTECTED) || 3.439 - (mode == VM86_PROTECTED_TO_REAL)) { 3.440 + (mode == VM86_PROTECTED_TO_REAL)) { 3.441 disp = (char) fetch8(regs); 3.442 TRACE((regs, 2, "jmp 0x%x", regs->eip + disp)); 3.443 regs->eip += disp; 3.444 @@ -1619,10 +1596,10 @@ opcode(struct regs *regs) 3.445 continue; 3.446 3.447 case 0xF6: /* addr32 testb $imm, r/m8 */ 3.448 - if ((prefix & ADDR32) == 0) 3.449 - goto invalid; 3.450 - if (!test(regs, prefix, opc)) 3.451 - goto invalid; 3.452 + if ((prefix & ADDR32) == 0) 3.453 + goto invalid; 3.454 + if (!test(regs, prefix, opc)) 3.455 + goto invalid; 3.456 return OPC_EMULATED; 3.457 3.458 case 0xFA: /* cli */ 3.459 @@ -1682,6 +1659,8 @@ trap(int trapno, int errno, struct regs 3.460 case 1: /* Debug */ 3.461 if (regs->eflags & EFLAGS_VM) { 3.462 /* emulate any 8086 instructions */ 3.463 + if (mode == VM86_REAL) 3.464 + return; 3.465 if (mode != VM86_REAL_TO_PROTECTED) 3.466 panic("not in real-to-protected mode"); 3.467 emulate(regs); 3.468 @@ -1702,7 +1681,7 @@ trap(int trapno, int errno, struct regs 3.469 default: 3.470 invalid: 3.471 printf("Trap (0x%x) while in %s mode\n", 3.472 - trapno, regs->eflags & EFLAGS_VM ? "real" : "protected"); 3.473 + trapno, regs->eflags & EFLAGS_VM ? "real" : "protected"); 3.474 if (trapno == 14) 3.475 printf("Page fault address 0x%x\n", get_cr2()); 3.476 dump_regs(regs);
4.1 --- a/tools/ioemu/hw/tpm_tis.c Thu Dec 07 11:41:38 2006 +0000 4.2 +++ b/tools/ioemu/hw/tpm_tis.c Thu Dec 07 11:44:05 2006 +0000 4.3 @@ -132,7 +132,7 @@ typedef struct TPMState { 4.4 4.5 4.6 /* local prototypes */ 4.7 -static int TPM_Send(tpmState *s, tpmBuffer *buffer, char *msg); 4.8 +static int TPM_Send(tpmState *s, tpmBuffer *buffer, uint8_t locty, char *msg); 4.9 static int TPM_Receive(tpmState *s, tpmBuffer *buffer); 4.10 static uint32_t vtpm_instance_from_xenstore(void); 4.11 static void tis_poll_timer(void *opaque); 4.12 @@ -271,6 +271,8 @@ static int create_local_socket(tpmState 4.13 /* 4.14 * the 'write' method for sending requests to the vTPM 4.15 * four bytes with the vTPM instance number are prepended to each request 4.16 + * the locality in which the command was sent is transmitted in the 4.17 + * highest 3 bits 4.18 */ 4.19 static int write_local_socket(tpmState *s, const tpmBuffer *buffer) 4.20 { 4.21 @@ -608,7 +610,7 @@ static void tis_mem_writel(void *opaque, 4.22 } 4.23 } 4.24 if (val & STS_TPM_GO) { 4.25 - n = TPM_Send(s, &s->buffer,"tpm_data_write"); 4.26 + n = TPM_Send(s, &s->buffer, locty, "tpm_data_write"); 4.27 if (n > 0) { 4.28 /* sending of data was successful */ 4.29 s->offset = 0; 4.30 @@ -915,7 +917,7 @@ const static unsigned char tpm_failure[] 4.31 /* 4.32 * Send a TPM request. 4.33 */ 4.34 -static int TPM_Send(tpmState *s, tpmBuffer *buffer, char *msg) 4.35 +static int TPM_Send(tpmState *s, tpmBuffer *buffer, uint8_t locty, char *msg) 4.36 { 4.37 int len; 4.38 uint32_t size = tpm_get_size_from_buffer(buffer->buf); 4.39 @@ -945,6 +947,10 @@ static int TPM_Send(tpmState *s, tpmBuff 4.40 showBuff(buffer->buf, "To TPM"); 4.41 #endif 4.42 4.43 + /* transmit the locality in the highest 3 bits */ 4.44 + buffer->instance[0] &= 0x1f; 4.45 + buffer->instance[0] |= (locty << 5); 4.46 + 4.47 len = vTPMTransmit[s->Transmitlayer].write(s, buffer); 4.48 if (len < 0) { 4.49 s->Transmitlayer = -1;
5.1 --- a/tools/ioemu/target-i386-dm/cpu.h Thu Dec 07 11:41:38 2006 +0000 5.2 +++ b/tools/ioemu/target-i386-dm/cpu.h Thu Dec 07 11:44:05 2006 +0000 5.3 @@ -25,7 +25,8 @@ 5.4 #ifdef TARGET_X86_64 5.5 #define TARGET_LONG_BITS 64 5.6 #else 5.7 -#define TARGET_LONG_BITS 32 5.8 +/* #define TARGET_LONG_BITS 32 */ 5.9 +#define TARGET_LONG_BITS 64 /* for Qemu map cache */ 5.10 #endif 5.11 5.12 /* target supports implicit self modifying code */
6.1 --- a/tools/ioemu/target-i386-dm/exec-dm.c Thu Dec 07 11:41:38 2006 +0000 6.2 +++ b/tools/ioemu/target-i386-dm/exec-dm.c Thu Dec 07 11:44:05 2006 +0000 6.3 @@ -36,6 +36,7 @@ 6.4 6.5 #include "cpu.h" 6.6 #include "exec-all.h" 6.7 +#include "vl.h" 6.8 6.9 //#define DEBUG_TB_INVALIDATE 6.10 //#define DEBUG_FLUSH 6.11 @@ -426,6 +427,12 @@ static inline int paddr_is_ram(target_ph 6.12 #endif 6.13 } 6.14 6.15 +#if defined(__i386__) || defined(__x86_64__) 6.16 +#define phys_ram_addr(x) (qemu_map_cache(x)) 6.17 +#elif defined(__ia64__) 6.18 +#define phys_ram_addr(x) (phys_ram_base + (x)) 6.19 +#endif 6.20 + 6.21 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 6.22 int len, int is_write) 6.23 { 6.24 @@ -438,7 +445,7 @@ void cpu_physical_memory_rw(target_phys_ 6.25 l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); 6.26 if (l > len) 6.27 l = len; 6.28 - 6.29 + 6.30 io_index = iomem_index(addr); 6.31 if (is_write) { 6.32 if (io_index) { 6.33 @@ -460,9 +467,10 @@ void cpu_physical_memory_rw(target_phys_ 6.34 } 6.35 } else if (paddr_is_ram(addr)) { 6.36 /* Reading from RAM */ 6.37 - memcpy(phys_ram_base + addr, buf, l); 6.38 + ptr = phys_ram_addr(addr); 6.39 + memcpy(ptr, buf, l); 6.40 #ifdef __ia64__ 6.41 - sync_icache((unsigned long)(phys_ram_base + addr), l); 6.42 + sync_icache(ptr, l); 6.43 #endif 6.44 } 6.45 } else { 6.46 @@ -485,7 +493,8 @@ void cpu_physical_memory_rw(target_phys_ 6.47 } 6.48 } else if (paddr_is_ram(addr)) { 6.49 /* Reading from RAM */ 6.50 - memcpy(buf, phys_ram_base + addr, l); 6.51 + ptr = phys_ram_addr(addr); 6.52 + memcpy(buf, ptr, l); 6.53 } else { 6.54 /* Neither RAM nor known MMIO space */ 6.55 memset(buf, 0xff, len);
7.1 --- a/tools/ioemu/vl.c Thu Dec 07 11:41:38 2006 +0000 7.2 +++ b/tools/ioemu/vl.c Thu Dec 07 11:44:05 2006 +0000 7.3 @@ -5808,6 +5808,92 @@ int set_mm_mapping(int xc_handle, uint32 7.4 return 0; 7.5 } 7.6 7.7 +#if defined(__i386__) || defined(__x86_64__) 7.8 +static struct map_cache *mapcache_entry; 7.9 +static unsigned long nr_buckets; 7.10 + 7.11 +static int qemu_map_cache_init(unsigned long nr_pages) 7.12 +{ 7.13 + unsigned long max_pages = MAX_MCACHE_SIZE >> PAGE_SHIFT; 7.14 + int i; 7.15 + 7.16 + if (nr_pages < max_pages) 7.17 + max_pages = nr_pages; 7.18 + 7.19 + nr_buckets = (max_pages << PAGE_SHIFT) >> MCACHE_BUCKET_SHIFT; 7.20 + 7.21 + fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx\n", nr_buckets); 7.22 + 7.23 + mapcache_entry = malloc(nr_buckets * sizeof(struct map_cache)); 7.24 + if (mapcache_entry == NULL) { 7.25 + errno = ENOMEM; 7.26 + return -1; 7.27 + } 7.28 + 7.29 + memset(mapcache_entry, 0, nr_buckets * sizeof(struct map_cache)); 7.30 + 7.31 + /* 7.32 + * To avoid ENOMEM from xc_map_foreign_batch() at runtime, we 7.33 + * pre-fill all the map caches in advance. 7.34 + */ 7.35 + for (i = 0; i < nr_buckets; i++) 7.36 + (void)qemu_map_cache(((target_phys_addr_t)i) << MCACHE_BUCKET_SHIFT); 7.37 + 7.38 + return 0; 7.39 +} 7.40 + 7.41 +uint8_t *qemu_map_cache(target_phys_addr_t phys_addr) 7.42 +{ 7.43 + struct map_cache *entry; 7.44 + unsigned long address_index = phys_addr >> MCACHE_BUCKET_SHIFT; 7.45 + unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1); 7.46 + 7.47 + /* For most cases (>99.9%), the page address is the same. */ 7.48 + static unsigned long last_address_index = ~0UL; 7.49 + static uint8_t *last_address_vaddr; 7.50 + 7.51 + if (address_index == last_address_index) 7.52 + return last_address_vaddr + address_offset; 7.53 + 7.54 + entry = &mapcache_entry[address_index % nr_buckets]; 7.55 + 7.56 + if (entry->vaddr_base == NULL || entry->paddr_index != address_index) 7.57 + { 7.58 + /* We need to remap a bucket. */ 7.59 + uint8_t *vaddr_base; 7.60 + unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT]; 7.61 + unsigned int i; 7.62 + 7.63 + if (entry->vaddr_base != NULL) { 7.64 + errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE); 7.65 + if (errno) { 7.66 + fprintf(logfile, "unmap fails %d\n", errno); 7.67 + exit(-1); 7.68 + } 7.69 + } 7.70 + 7.71 + for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++) 7.72 + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i; 7.73 + 7.74 + vaddr_base = xc_map_foreign_batch( 7.75 + xc_handle, domid, PROT_READ|PROT_WRITE, 7.76 + pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT); 7.77 + if (vaddr_base == NULL) { 7.78 + fprintf(logfile, "xc_map_foreign_batch error %d\n", errno); 7.79 + exit(-1); 7.80 + } 7.81 + 7.82 + entry->vaddr_base = vaddr_base; 7.83 + entry->paddr_index = address_index;; 7.84 + } 7.85 + 7.86 + last_address_index = address_index; 7.87 + last_address_vaddr = entry->vaddr_base; 7.88 + 7.89 + return last_address_vaddr + address_offset; 7.90 +} 7.91 +#endif 7.92 + 7.93 int main(int argc, char **argv) 7.94 { 7.95 #ifdef CONFIG_GDBSTUB 7.96 @@ -6130,6 +6216,7 @@ int main(int argc, char **argv) 7.97 break; 7.98 case QEMU_OPTION_m: 7.99 ram_size = atol(optarg) * 1024 * 1024; 7.100 + ram_size = (uint64_t)atol(optarg) * 1024 * 1024; 7.101 if (ram_size <= 0) 7.102 help(); 7.103 #ifndef CONFIG_DM 7.104 @@ -6400,50 +6487,41 @@ int main(int argc, char **argv) 7.105 shared_page_nr = nr_pages - 1; 7.106 #endif 7.107 7.108 - page_array = (xen_pfn_t *)malloc(tmp_nr_pages * sizeof(xen_pfn_t)); 7.109 - if (page_array == NULL) { 7.110 - fprintf(logfile, "malloc returned error %d\n", errno); 7.111 - exit(-1); 7.112 - } 7.113 - 7.114 #if defined(__i386__) || defined(__x86_64__) 7.115 - for ( i = 0; i < tmp_nr_pages; i++) 7.116 - page_array[i] = i; 7.117 - 7.118 - phys_ram_base = xc_map_foreign_batch(xc_handle, domid, 7.119 - PROT_READ|PROT_WRITE, page_array, 7.120 - tmp_nr_pages); 7.121 - if (phys_ram_base == NULL) { 7.122 - fprintf(logfile, "batch map guest memory returned error %d\n", errno); 7.123 + 7.124 + if ( qemu_map_cache_init(tmp_nr_pages) ) 7.125 + { 7.126 + fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno); 7.127 exit(-1); 7.128 } 7.129 7.130 shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, 7.131 - PROT_READ|PROT_WRITE, 7.132 - page_array[shared_page_nr]); 7.133 + PROT_READ|PROT_WRITE, shared_page_nr); 7.134 if (shared_page == NULL) { 7.135 fprintf(logfile, "map shared IO page returned error %d\n", errno); 7.136 exit(-1); 7.137 } 7.138 7.139 - fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", 7.140 - shared_page_nr, (uint64_t)(page_array[shared_page_nr])); 7.141 + fprintf(logfile, "shared page at pfn:%lx\n", shared_page_nr); 7.142 7.143 buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, 7.144 PROT_READ|PROT_WRITE, 7.145 - page_array[shared_page_nr - 2]); 7.146 + shared_page_nr - 2); 7.147 if (buffered_io_page == NULL) { 7.148 fprintf(logfile, "map buffered IO page returned error %d\n", errno); 7.149 exit(-1); 7.150 } 7.151 7.152 - fprintf(logfile, "buffered io page at pfn:%lx, mfn: %"PRIx64"\n", 7.153 - shared_page_nr - 2, (uint64_t)(page_array[shared_page_nr - 2])); 7.154 - 7.155 - free(page_array); 7.156 + fprintf(logfile, "buffered io page at pfn:%lx\n", shared_page_nr - 2); 7.157 7.158 #elif defined(__ia64__) 7.159 - 7.160 + 7.161 + page_array = (xen_pfn_t *)malloc(tmp_nr_pages * sizeof(xen_pfn_t)); 7.162 + if (page_array == NULL) { 7.163 + fprintf(logfile, "malloc returned error %d\n", errno); 7.164 + exit(-1); 7.165 + } 7.166 + 7.167 if (xc_ia64_get_pfn_list(xc_handle, domid, page_array, 7.168 IO_PAGE_START >> PAGE_SHIFT, 3) != 3) { 7.169 fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
8.1 --- a/tools/ioemu/vl.h Thu Dec 07 11:41:38 2006 +0000 8.2 +++ b/tools/ioemu/vl.h Thu Dec 07 11:44:05 2006 +0000 8.3 @@ -156,6 +156,26 @@ extern void *shared_vram; 8.4 8.5 extern FILE *logfile; 8.6 8.7 + 8.8 +#if defined(__i386__) || defined(__x86_64__) 8.9 +#if defined(__i386__) 8.10 +#define MAX_MCACHE_SIZE 0x40000000 /* 1GB max for x86 */ 8.11 +#define MCACHE_BUCKET_SHIFT 16 8.12 +#elif defined(__x86_64__) 8.13 +#define MAX_MCACHE_SIZE 0x1000000000 /* 64GB max for x86_64 */ 8.14 +#define MCACHE_BUCKET_SHIFT 20 8.15 +#endif 8.16 + 8.17 +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) 8.18 + 8.19 +struct map_cache { 8.20 + unsigned long paddr_index; 8.21 + uint8_t *vaddr_base; 8.22 +}; 8.23 + 8.24 +uint8_t *qemu_map_cache(target_phys_addr_t phys_addr); 8.25 +#endif 8.26 + 8.27 extern int xc_handle; 8.28 extern int domid; 8.29
9.1 --- a/tools/libxc/xc_hvm_build.c Thu Dec 07 11:41:38 2006 +0000 9.2 +++ b/tools/libxc/xc_hvm_build.c Thu Dec 07 11:44:05 2006 +0000 9.3 @@ -285,7 +285,6 @@ static int xc_hvm_build_internal(int xc_ 9.4 9.5 if ( setup_guest(xc_handle, domid, memsize, image, image_size, &ctxt) < 0 ) 9.6 { 9.7 - ERROR("Error constructing guest OS"); 9.8 goto error_out; 9.9 } 9.10 9.11 @@ -329,26 +328,30 @@ static int parseelfimage(char *elfbase, 9.12 9.13 if ( !IS_ELF(*ehdr) ) 9.14 { 9.15 - ERROR("Kernel image does not have an ELF header."); 9.16 + xc_set_error(XC_INVALID_KERNEL, 9.17 + "Kernel image does not have an ELF header."); 9.18 return -EINVAL; 9.19 } 9.20 9.21 if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize ) 9.22 { 9.23 - ERROR("ELF program headers extend beyond end of image."); 9.24 + xc_set_error(XC_INVALID_KERNEL, 9.25 + "ELF program headers extend beyond end of image."); 9.26 return -EINVAL; 9.27 } 9.28 9.29 if ( (ehdr->e_shoff + (ehdr->e_shnum * ehdr->e_shentsize)) > elfsize ) 9.30 { 9.31 - ERROR("ELF section headers extend beyond end of image."); 9.32 + xc_set_error(XC_INVALID_KERNEL, 9.33 + "ELF section headers extend beyond end of image."); 9.34 return -EINVAL; 9.35 } 9.36 9.37 /* Find the section-header strings table. */ 9.38 if ( ehdr->e_shstrndx == SHN_UNDEF ) 9.39 { 9.40 - ERROR("ELF image has no section-header strings table (shstrtab)."); 9.41 + xc_set_error(XC_INVALID_KERNEL, 9.42 + "ELF image has no section-header strings table (shstrtab)."); 9.43 return -EINVAL; 9.44 } 9.45 shdr = (Elf32_Shdr *)(elfbase + ehdr->e_shoff + 9.46 @@ -370,7 +373,8 @@ static int parseelfimage(char *elfbase, 9.47 (ehdr->e_entry < kernstart) || 9.48 (ehdr->e_entry > kernend) ) 9.49 { 9.50 - ERROR("Malformed ELF image."); 9.51 + xc_set_error(XC_INVALID_KERNEL, 9.52 + "Malformed ELF image."); 9.53 return -EINVAL; 9.54 } 9.55
10.1 --- a/tools/libxc/xc_linux_build.c Thu Dec 07 11:41:38 2006 +0000 10.2 +++ b/tools/libxc/xc_linux_build.c Thu Dec 07 11:44:05 2006 +0000 10.3 @@ -120,7 +120,7 @@ static int probeimageformat(const char * 10.4 if ( probe_elf(image, image_size, load_funcs) && 10.5 probe_bin(image, image_size, load_funcs) ) 10.6 { 10.7 - ERROR( "Unrecognized image format" ); 10.8 + xc_set_error(XC_INVALID_KERNEL, "Not a valid ELF or raw kernel image"); 10.9 return -EINVAL; 10.10 } 10.11 10.12 @@ -618,17 +618,20 @@ static int compat_check(int xc_handle, s 10.13 xen_capabilities_info_t xen_caps = ""; 10.14 10.15 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) { 10.16 - ERROR("Cannot determine host capabilities."); 10.17 + xc_set_error(XC_INVALID_KERNEL, 10.18 + "Cannot determine host capabilities."); 10.19 return 0; 10.20 } 10.21 10.22 if (strstr(xen_caps, "xen-3.0-x86_32p")) { 10.23 if (dsi->pae_kernel == PAEKERN_no) { 10.24 - ERROR("Non PAE-kernel on PAE host."); 10.25 + xc_set_error(XC_INVALID_KERNEL, 10.26 + "Non PAE-kernel on PAE host."); 10.27 return 0; 10.28 } 10.29 } else if (dsi->pae_kernel != PAEKERN_no) { 10.30 - ERROR("PAE-kernel on non-PAE host."); 10.31 + xc_set_error(XC_INVALID_KERNEL, 10.32 + "PAE-kernel on non-PAE host."); 10.33 return 0; 10.34 } 10.35 10.36 @@ -1141,7 +1144,6 @@ static int xc_linux_build_internal(int x 10.37 console_evtchn, console_mfn, 10.38 features_bitmap) < 0 ) 10.39 { 10.40 - ERROR("Error constructing guest OS"); 10.41 goto error_out; 10.42 } 10.43
11.1 --- a/tools/libxc/xc_load_elf.c Thu Dec 07 11:41:38 2006 +0000 11.2 +++ b/tools/libxc/xc_load_elf.c Thu Dec 07 11:44:05 2006 +0000 11.3 @@ -29,20 +29,46 @@ loadelfsymtab( 11.4 */ 11.5 #if defined(__ia64__) 11.6 #define ELFCLASS ELFCLASS64 11.7 +#define ELFCLASS_DESC "64-bit" 11.8 + 11.9 #define ELFDATA ELFDATA2LSB 11.10 +#define ELFDATA_DESC "Little-Endian" 11.11 + 11.12 #define ELFMACHINE EM_IA_64 11.13 +#define ELFMACHINE_DESC "ia64" 11.14 + 11.15 + 11.16 #elif defined(__i386__) 11.17 #define ELFCLASS ELFCLASS32 11.18 +#define ELFCLASS_DESC "32-bit" 11.19 + 11.20 #define ELFDATA ELFDATA2LSB 11.21 +#define ELFDATA_DESC "Little-Endian" 11.22 + 11.23 #define ELFMACHINE EM_386 11.24 +#define ELFMACHINE_DESC "i386" 11.25 + 11.26 + 11.27 #elif defined(__x86_64__) 11.28 #define ELFCLASS ELFCLASS64 11.29 +#define ELFCLASS_DESC "64-bit" 11.30 + 11.31 #define ELFDATA ELFDATA2LSB 11.32 +#define ELFDATA_DESC "Little-Endian" 11.33 + 11.34 #define ELFMACHINE EM_X86_64 11.35 +#define ELFMACHINE_DESC "x86_64" 11.36 + 11.37 + 11.38 #elif defined(__powerpc__) 11.39 #define ELFCLASS ELFCLASS64 11.40 +#define ELFCLASS_DESC "64-bit" 11.41 + 11.42 #define ELFDATA ELFDATA2MSB 11.43 +#define ELFDATA_DESC "Big-Endian" 11.44 + 11.45 #define ELFMACHINE EM_PPC64 11.46 +#define ELFMACHINE_DESC "ppc64" 11.47 #endif 11.48 11.49 int probe_elf(const char *image, 11.50 @@ -231,7 +257,8 @@ unsigned long long xen_elfnote_numeric(s 11.51 *defined = 1; 11.52 return *(uint64_t*)ELFNOTE_DESC(note); 11.53 default: 11.54 - ERROR("elfnotes: unknown data size %#x for numeric type note %#x\n", 11.55 + xc_set_error(XC_INVALID_KERNEL, 11.56 + "elfnotes: unknown data size %#x for numeric type note %#x\n", 11.57 note->descsz, type); 11.58 return 0; 11.59 } 11.60 @@ -250,35 +277,59 @@ static int parseelfimage(const char *ima 11.61 11.62 if ( !IS_ELF(*ehdr) ) 11.63 { 11.64 - ERROR("Kernel image does not have an ELF header."); 11.65 + xc_set_error(XC_INVALID_KERNEL, 11.66 + "Kernel image does not have an ELF header."); 11.67 return -EINVAL; 11.68 } 11.69 11.70 - if ( (ehdr->e_ident[EI_CLASS] != ELFCLASS) || 11.71 - (ehdr->e_machine != ELFMACHINE) || 11.72 - (ehdr->e_ident[EI_DATA] != ELFDATA) || 11.73 - (ehdr->e_type != ET_EXEC) ) 11.74 + if (ehdr->e_machine != ELFMACHINE) 11.75 + { 11.76 + xc_set_error(XC_INVALID_KERNEL, 11.77 + "Kernel ELF architecture '%d' does not match Xen architecture '%d' (%s)", 11.78 + ehdr->e_machine, ELFMACHINE, ELFMACHINE_DESC); 11.79 + return -EINVAL; 11.80 + } 11.81 + if (ehdr->e_ident[EI_CLASS] != ELFCLASS) 11.82 { 11.83 - ERROR("Kernel not a Xen-compatible Elf image."); 11.84 + xc_set_error(XC_INVALID_KERNEL, 11.85 + "Kernel ELF wordsize '%d' does not match Xen wordsize '%d' (%s)", 11.86 + ehdr->e_ident[EI_CLASS], ELFCLASS, ELFCLASS_DESC); 11.87 + return -EINVAL; 11.88 + } 11.89 + if (ehdr->e_ident[EI_DATA] != ELFDATA) 11.90 + { 11.91 + xc_set_error(XC_INVALID_KERNEL, 11.92 + "Kernel ELF endianness '%d' does not match Xen endianness '%d' (%s)", 11.93 + ehdr->e_ident[EI_DATA], ELFDATA, ELFDATA_DESC); 11.94 + return -EINVAL; 11.95 + } 11.96 + if (ehdr->e_type != ET_EXEC) 11.97 + { 11.98 + xc_set_error(XC_INVALID_KERNEL, 11.99 + "Kernel ELF type '%d' does not match Xen type '%d'", 11.100 + ehdr->e_type, ET_EXEC); 11.101 return -EINVAL; 11.102 } 11.103 11.104 if ( (ehdr->e_phoff + (ehdr->e_phnum*ehdr->e_phentsize)) > image_len ) 11.105 { 11.106 - ERROR("ELF program headers extend beyond end of image."); 11.107 + xc_set_error(XC_INVALID_KERNEL, 11.108 + "ELF program headers extend beyond end of image."); 11.109 return -EINVAL; 11.110 } 11.111 11.112 if ( (ehdr->e_shoff + (ehdr->e_shnum*ehdr->e_shentsize)) > image_len ) 11.113 { 11.114 - ERROR("ELF section headers extend beyond end of image."); 11.115 + xc_set_error(XC_INVALID_KERNEL, 11.116 + "ELF section headers extend beyond end of image."); 11.117 return -EINVAL; 11.118 } 11.119 11.120 /* Find the section-header strings table. */ 11.121 if ( ehdr->e_shstrndx == SHN_UNDEF ) 11.122 { 11.123 - ERROR("ELF image has no section-header strings table (shstrtab)."); 11.124 + xc_set_error(XC_INVALID_KERNEL, 11.125 + "ELF image has no section-header strings table (shstrtab)."); 11.126 return -EINVAL; 11.127 } 11.128 shdr = (Elf_Shdr *)(image + ehdr->e_shoff + 11.129 @@ -325,22 +376,25 @@ static int parseelfimage(const char *ima 11.130 if ( ( loader == NULL || strncmp(loader, "generic", 7) ) && 11.131 ( guest_os == NULL || strncmp(guest_os, "linux", 5) ) ) 11.132 { 11.133 - ERROR("Will only load images built for the generic loader " 11.134 - "or Linux images"); 11.135 + xc_set_error(XC_INVALID_KERNEL, 11.136 + "Will only load images built for the generic loader " 11.137 + "or Linux images"); 11.138 return -EINVAL; 11.139 } 11.140 11.141 if ( xen_version == NULL || strncmp(xen_version, "xen-3.0", 7) ) 11.142 { 11.143 - ERROR("Will only load images built for Xen v3.0"); 11.144 + xc_set_error(XC_INVALID_KERNEL, 11.145 + "Will only load images built for Xen v3.0"); 11.146 return -EINVAL; 11.147 } 11.148 } 11.149 else 11.150 { 11.151 #if defined(__x86_64__) || defined(__i386__) 11.152 - ERROR("Not a Xen-ELF image: " 11.153 - "No ELF notes or '__xen_guest' section found."); 11.154 + xc_set_error(XC_INVALID_KERNEL, 11.155 + "Not a Xen-ELF image: " 11.156 + "No ELF notes or '__xen_guest' section found."); 11.157 return -EINVAL; 11.158 #endif 11.159 } 11.160 @@ -396,8 +450,9 @@ static int parseelfimage(const char *ima 11.161 11.162 if ( elf_pa_off_defined && !virt_base_defined ) 11.163 { 11.164 - ERROR("Neither ELF_PADDR_OFFSET nor VIRT_BASE found in ELF " 11.165 - " notes or __xen_guest section."); 11.166 + xc_set_error(XC_INVALID_KERNEL, 11.167 + "Neither ELF_PADDR_OFFSET nor VIRT_BASE found in ELF " 11.168 + " notes or __xen_guest section."); 11.169 return -EINVAL; 11.170 } 11.171 11.172 @@ -409,7 +464,8 @@ static int parseelfimage(const char *ima 11.173 vaddr = phdr->p_paddr - dsi->elf_paddr_offset + dsi->v_start; 11.174 if ( (vaddr + phdr->p_memsz) < vaddr ) 11.175 { 11.176 - ERROR("ELF program header %d is too large.", h); 11.177 + xc_set_error(XC_INVALID_KERNEL, 11.178 + "ELF program header %d is too large.", h); 11.179 return -EINVAL; 11.180 } 11.181 11.182 @@ -431,7 +487,8 @@ static int parseelfimage(const char *ima 11.183 (dsi->v_kernentry > kernend) || 11.184 (dsi->v_start > kernstart) ) 11.185 { 11.186 - ERROR("ELF start or entries are out of bounds."); 11.187 + xc_set_error(XC_INVALID_KERNEL, 11.188 + "ELF start or entries are out of bounds."); 11.189 return -EINVAL; 11.190 } 11.191
12.1 --- a/tools/libxc/xc_private.c Thu Dec 07 11:41:38 2006 +0000 12.2 +++ b/tools/libxc/xc_private.c Thu Dec 07 11:44:05 2006 +0000 12.3 @@ -8,6 +8,82 @@ 12.4 #include "xc_private.h" 12.5 #include "xg_private.h" 12.6 12.7 +#include <stdarg.h> 12.8 + 12.9 +static __thread xc_error last_error = { XC_ERROR_NONE, ""}; 12.10 +#if DEBUG 12.11 +static xc_error_handler error_handler = xc_default_error_handler; 12.12 +#else 12.13 +static xc_error_handler error_handler = NULL; 12.14 +#endif 12.15 + 12.16 +void xc_default_error_handler(const xc_error const *err) 12.17 +{ 12.18 + const char *desc = xc_error_code_to_desc(err->code); 12.19 + fprintf(stderr, "ERROR %s: %s\n", desc, err->message); 12.20 +} 12.21 + 12.22 +const xc_error const *xc_get_last_error(void) 12.23 +{ 12.24 + return &last_error; 12.25 +} 12.26 + 12.27 +void xc_clear_last_error(void) 12.28 +{ 12.29 + last_error.code = XC_ERROR_NONE; 12.30 + last_error.message[0] = '\0'; 12.31 +} 12.32 + 12.33 +const char *xc_error_code_to_desc(int code) 12.34 +{ 12.35 + /* Sync to members of xc_error_code enumeration in xenctrl.h */ 12.36 + switch ( code ) 12.37 + { 12.38 + case XC_ERROR_NONE: 12.39 + return "No error details"; 12.40 + case XC_INTERNAL_ERROR: 12.41 + return "Internal error"; 12.42 + case XC_INVALID_KERNEL: 12.43 + return "Invalid kernel"; 12.44 + } 12.45 + 12.46 + return "Unknown error code"; 12.47 +} 12.48 + 12.49 +xc_error_handler xc_set_error_handler(xc_error_handler handler) 12.50 +{ 12.51 + xc_error_handler old = error_handler; 12.52 + error_handler = handler; 12.53 + return old; 12.54 +} 12.55 + 12.56 + 12.57 +static void _xc_set_error(int code, const char *msg) 12.58 +{ 12.59 + last_error.code = code; 12.60 + strncpy(last_error.message, msg, XC_MAX_ERROR_MSG_LEN - 1); 12.61 + last_error.message[XC_MAX_ERROR_MSG_LEN-1] = '\0'; 12.62 +} 12.63 + 12.64 +void xc_set_error(int code, const char *fmt, ...) 12.65 +{ 12.66 + int saved_errno = errno; 12.67 + char msg[XC_MAX_ERROR_MSG_LEN]; 12.68 + va_list args; 12.69 + 12.70 + va_start(args, fmt); 12.71 + vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args); 12.72 + msg[XC_MAX_ERROR_MSG_LEN-1] = '\0'; 12.73 + va_end(args); 12.74 + 12.75 + _xc_set_error(code, msg); 12.76 + 12.77 + errno = saved_errno; 12.78 + 12.79 + if ( error_handler != NULL ) 12.80 + error_handler(&last_error); 12.81 +} 12.82 + 12.83 int lock_pages(void *addr, size_t len) 12.84 { 12.85 int e = 0;
13.1 --- a/tools/libxc/xc_private.h Thu Dec 07 11:41:38 2006 +0000 13.2 +++ b/tools/libxc/xc_private.h Thu Dec 07 11:44:05 2006 +0000 13.3 @@ -59,24 +59,15 @@ 13.4 #define PPRINTF(_f, _a...) 13.5 #endif 13.6 13.7 -#define ERROR(_m, _a...) \ 13.8 -do { \ 13.9 - int __saved_errno = errno; \ 13.10 - DPRINTF("ERROR: " _m "\n" , ## _a ); \ 13.11 - errno = __saved_errno; \ 13.12 -} while (0) 13.13 +void xc_set_error(int code, const char *fmt, ...); 13.14 + 13.15 +#define ERROR(_m, _a...) xc_set_error(XC_INTERNAL_ERROR, _m , ## _a ) 13.16 +#define PERROR(_m, _a...) xc_set_error(XC_INTERNAL_ERROR, _m " (%d = %s)", \ 13.17 + _m , ## _a , errno, strerror(errno)) 13.18 13.19 int lock_pages(void *addr, size_t len); 13.20 void unlock_pages(void *addr, size_t len); 13.21 13.22 -#define PERROR(_m, _a...) \ 13.23 -do { \ 13.24 - int __saved_errno = errno; \ 13.25 - DPRINTF("ERROR: " _m " (%d = %s)\n" , ## _a , \ 13.26 - __saved_errno, strerror(__saved_errno)); \ 13.27 - errno = __saved_errno; \ 13.28 -} while (0) 13.29 - 13.30 static inline void safe_munlock(const void *addr, size_t len) 13.31 { 13.32 int saved_errno = errno;
14.1 --- a/tools/libxc/xenctrl.h Thu Dec 07 11:41:38 2006 +0000 14.2 +++ b/tools/libxc/xenctrl.h Thu Dec 07 11:44:05 2006 +0000 14.3 @@ -682,4 +682,46 @@ int xc_hvm_set_isa_irq_level( 14.4 int xc_hvm_set_pci_link_route( 14.5 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq); 14.6 14.7 + 14.8 +typedef enum { 14.9 + XC_ERROR_NONE = 0, 14.10 + XC_INTERNAL_ERROR = 1, 14.11 + XC_INVALID_KERNEL = 2, 14.12 +} xc_error_code; 14.13 + 14.14 +#define XC_MAX_ERROR_MSG_LEN 1024 14.15 +typedef struct { 14.16 + int code; 14.17 + char message[XC_MAX_ERROR_MSG_LEN]; 14.18 +} xc_error; 14.19 + 14.20 +/* 14.21 + * Return a pointer to the last error. This pointer and the 14.22 + * data pointed to are only valid until the next call to 14.23 + * libxc. 14.24 + */ 14.25 +const xc_error const *xc_get_last_error(void); 14.26 + 14.27 +/* 14.28 + * Clear the last error 14.29 + */ 14.30 +void xc_clear_last_error(void); 14.31 + 14.32 +typedef void (*xc_error_handler)(const xc_error const* err); 14.33 + 14.34 +/* 14.35 + * The default error handler which prints to stderr 14.36 + */ 14.37 +void xc_default_error_handler(const xc_error const* err); 14.38 + 14.39 +/* 14.40 + * Convert an error code into a text description 14.41 + */ 14.42 +const char *xc_error_code_to_desc(int code); 14.43 + 14.44 +/* 14.45 + * Registers a callback to handle errors 14.46 + */ 14.47 +xc_error_handler xc_set_error_handler(xc_error_handler handler); 14.48 + 14.49 #endif
15.1 --- a/tools/python/xen/lowlevel/xc/xc.c Thu Dec 07 11:41:38 2006 +0000 15.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Thu Dec 07 11:44:05 2006 +0000 15.3 @@ -29,7 +29,7 @@ 15.4 #define PKG "xen.lowlevel.xc" 15.5 #define CLS "xc" 15.6 15.7 -static PyObject *xc_error, *zero; 15.8 +static PyObject *xc_error_obj, *zero; 15.9 15.10 typedef struct { 15.11 PyObject_HEAD; 15.12 @@ -40,6 +40,23 @@ typedef struct { 15.13 static PyObject *dom_op(XcObject *self, PyObject *args, 15.14 int (*fn)(int, uint32_t)); 15.15 15.16 +static PyObject *pyxc_error_to_exception(void) 15.17 +{ 15.18 + PyObject *pyerr; 15.19 + const xc_error const *err = xc_get_last_error(); 15.20 + const char *desc = xc_error_code_to_desc(err->code); 15.21 + 15.22 + if (err->message[1]) 15.23 + pyerr = Py_BuildValue("(iss)", err->code, desc, err->message); 15.24 + else 15.25 + pyerr = Py_BuildValue("(is)", err->code, desc); 15.26 + 15.27 + xc_clear_last_error(); 15.28 + 15.29 + PyErr_SetObject(xc_error_obj, pyerr); 15.30 + 15.31 + return NULL; 15.32 +} 15.33 15.34 static PyObject *pyxc_domain_dumpcore(XcObject *self, PyObject *args) 15.35 { 15.36 @@ -53,7 +70,7 @@ static PyObject *pyxc_domain_dumpcore(Xc 15.37 return NULL; 15.38 15.39 if (xc_domain_dumpcore(self->xc_handle, dom, corefile) != 0) 15.40 - return PyErr_SetFromErrno(xc_error); 15.41 + return pyxc_error_to_exception(); 15.42 15.43 Py_INCREF(zero); 15.44 return zero; 15.45 @@ -101,13 +118,13 @@ static PyObject *pyxc_domain_create(XcOb 15.46 15.47 if ( (ret = xc_domain_create(self->xc_handle, ssidref, 15.48 handle, flags, &dom)) < 0 ) 15.49 - return PyErr_SetFromErrno(xc_error); 15.50 + return pyxc_error_to_exception(); 15.51 15.52 return PyInt_FromLong(dom); 15.53 15.54 out_exception: 15.55 errno = EINVAL; 15.56 - PyErr_SetFromErrno(xc_error); 15.57 + PyErr_SetFromErrno(xc_error_obj); 15.58 return NULL; 15.59 } 15.60 15.61 @@ -119,7 +136,7 @@ static PyObject *pyxc_domain_max_vcpus(X 15.62 return NULL; 15.63 15.64 if (xc_domain_max_vcpus(self->xc_handle, dom, max) != 0) 15.65 - return PyErr_SetFromErrno(xc_error); 15.66 + return pyxc_error_to_exception(); 15.67 15.68 Py_INCREF(zero); 15.69 return zero; 15.70 @@ -164,7 +181,7 @@ static PyObject *pyxc_vcpu_setaffinity(X 15.71 } 15.72 15.73 if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) 15.74 - return PyErr_SetFromErrno(xc_error); 15.75 + return pyxc_error_to_exception(); 15.76 15.77 Py_INCREF(zero); 15.78 return zero; 15.79 @@ -184,7 +201,7 @@ static PyObject *pyxc_domain_setcpuweigh 15.80 return NULL; 15.81 15.82 if ( xc_domain_setcpuweight(self->xc_handle, dom, cpuweight) != 0 ) 15.83 - return PyErr_SetFromErrno(xc_error); 15.84 + return pyxc_error_to_exception(); 15.85 15.86 Py_INCREF(zero); 15.87 return zero; 15.88 @@ -215,14 +232,13 @@ static PyObject *pyxc_domain_sethandle(X 15.89 } 15.90 15.91 if (xc_domain_sethandle(self->xc_handle, dom, handle) < 0) 15.92 - return PyErr_SetFromErrno(xc_error); 15.93 + return pyxc_error_to_exception(); 15.94 15.95 Py_INCREF(zero); 15.96 return zero; 15.97 15.98 out_exception: 15.99 - errno = EINVAL; 15.100 - PyErr_SetFromErrno(xc_error); 15.101 + PyErr_SetFromErrno(xc_error_obj); 15.102 return NULL; 15.103 } 15.104 15.105 @@ -251,7 +267,7 @@ static PyObject *pyxc_domain_getinfo(XcO 15.106 if (nr_doms < 0) 15.107 { 15.108 free(info); 15.109 - return PyErr_SetFromErrno(xc_error); 15.110 + return pyxc_error_to_exception(); 15.111 } 15.112 15.113 list = PyList_New(nr_doms); 15.114 @@ -306,10 +322,10 @@ static PyObject *pyxc_vcpu_getinfo(XcObj 15.115 15.116 rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); 15.117 if ( rc < 0 ) 15.118 - return PyErr_SetFromErrno(xc_error); 15.119 + return pyxc_error_to_exception(); 15.120 rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); 15.121 if ( rc < 0 ) 15.122 - return PyErr_SetFromErrno(xc_error); 15.123 + return pyxc_error_to_exception(); 15.124 15.125 info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", 15.126 "online", info.online, 15.127 @@ -360,9 +376,7 @@ static PyObject *pyxc_linux_build(XcObje 15.128 ramdisk, cmdline, features, flags, 15.129 store_evtchn, &store_mfn, 15.130 console_evtchn, &console_mfn) != 0 ) { 15.131 - if (!errno) 15.132 - errno = EINVAL; 15.133 - return PyErr_SetFromErrno(xc_error); 15.134 + return pyxc_error_to_exception(); 15.135 } 15.136 return Py_BuildValue("{s:i,s:i}", 15.137 "store_mfn", store_mfn, 15.138 @@ -389,14 +403,14 @@ static PyObject *pyxc_hvm_build(XcObject 15.139 return NULL; 15.140 15.141 if ( xc_hvm_build(self->xc_handle, dom, memsize, image) != 0 ) 15.142 - return PyErr_SetFromErrno(xc_error); 15.143 + return pyxc_error_to_exception(); 15.144 15.145 /* Set up the HVM info table. */ 15.146 va_map = xc_map_foreign_range(self->xc_handle, dom, XC_PAGE_SIZE, 15.147 PROT_READ | PROT_WRITE, 15.148 HVM_INFO_PFN); 15.149 if ( va_map == NULL ) 15.150 - return PyErr_SetFromErrno(xc_error); 15.151 + return PyErr_SetFromErrno(xc_error_obj); 15.152 va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); 15.153 memset(va_hvm, 0, sizeof(*va_hvm)); 15.154 strncpy(va_hvm->signature, "HVM INFO", 8); 15.155 @@ -431,7 +445,7 @@ static PyObject *pyxc_evtchn_alloc_unbou 15.156 return NULL; 15.157 15.158 if ( (port = xc_evtchn_alloc_unbound(self->xc_handle, dom, remote_dom)) < 0 ) 15.159 - return PyErr_SetFromErrno(xc_error); 15.160 + return pyxc_error_to_exception(); 15.161 15.162 return PyInt_FromLong(port); 15.163 } 15.164 @@ -452,7 +466,7 @@ static PyObject *pyxc_physdev_pci_access 15.165 ret = xc_physdev_pci_access_modify( 15.166 self->xc_handle, dom, bus, dev, func, enable); 15.167 if ( ret != 0 ) 15.168 - return PyErr_SetFromErrno(xc_error); 15.169 + return pyxc_error_to_exception(); 15.170 15.171 Py_INCREF(zero); 15.172 return zero; 15.173 @@ -474,7 +488,7 @@ static PyObject *pyxc_readconsolering(Xc 15.174 15.175 ret = xc_readconsolering(self->xc_handle, &str, &count, clear); 15.176 if ( ret < 0 ) 15.177 - return PyErr_SetFromErrno(xc_error); 15.178 + return pyxc_error_to_exception(); 15.179 15.180 return PyString_FromStringAndSize(str, count); 15.181 } 15.182 @@ -504,7 +518,7 @@ static PyObject *pyxc_physinfo(XcObject 15.183 int i; 15.184 15.185 if ( xc_physinfo(self->xc_handle, &info) != 0 ) 15.186 - return PyErr_SetFromErrno(xc_error); 15.187 + return pyxc_error_to_exception(); 15.188 15.189 *q=0; 15.190 for(i=0;i<sizeof(info.hw_cap)/4;i++) 15.191 @@ -542,25 +556,25 @@ static PyObject *pyxc_xeninfo(XcObject * 15.192 xen_version = xc_version(self->xc_handle, XENVER_version, NULL); 15.193 15.194 if ( xc_version(self->xc_handle, XENVER_extraversion, &xen_extra) != 0 ) 15.195 - return PyErr_SetFromErrno(xc_error); 15.196 + return pyxc_error_to_exception(); 15.197 15.198 if ( xc_version(self->xc_handle, XENVER_compile_info, &xen_cc) != 0 ) 15.199 - return PyErr_SetFromErrno(xc_error); 15.200 + return pyxc_error_to_exception(); 15.201 15.202 if ( xc_version(self->xc_handle, XENVER_changeset, &xen_chgset) != 0 ) 15.203 - return PyErr_SetFromErrno(xc_error); 15.204 + return pyxc_error_to_exception(); 15.205 15.206 if ( xc_version(self->xc_handle, XENVER_capabilities, &xen_caps) != 0 ) 15.207 - return PyErr_SetFromErrno(xc_error); 15.208 + return pyxc_error_to_exception(); 15.209 15.210 if ( xc_version(self->xc_handle, XENVER_platform_parameters, &p_parms) != 0 ) 15.211 - return PyErr_SetFromErrno(xc_error); 15.212 + return pyxc_error_to_exception(); 15.213 15.214 sprintf(str, "virt_start=0x%lx", p_parms.virt_start); 15.215 15.216 xen_pagesize = xc_version(self->xc_handle, XENVER_pagesize, NULL); 15.217 if (xen_pagesize < 0 ) 15.218 - return PyErr_SetFromErrno(xc_error); 15.219 + return pyxc_error_to_exception(); 15.220 15.221 return Py_BuildValue("{s:i,s:i,s:s,s:s,s:i,s:s,s:s,s:s,s:s,s:s,s:s}", 15.222 "xen_major", xen_version >> 16, 15.223 @@ -593,7 +607,7 @@ static PyObject *pyxc_sedf_domain_set(Xc 15.224 return NULL; 15.225 if ( xc_sedf_domain_set(self->xc_handle, domid, period, 15.226 slice, latency, extratime,weight) != 0 ) 15.227 - return PyErr_SetFromErrno(xc_error); 15.228 + return pyxc_error_to_exception(); 15.229 15.230 Py_INCREF(zero); 15.231 return zero; 15.232 @@ -610,7 +624,7 @@ static PyObject *pyxc_sedf_domain_get(Xc 15.233 15.234 if (xc_sedf_domain_get(self->xc_handle, domid, &period, 15.235 &slice,&latency,&extratime,&weight)) 15.236 - return PyErr_SetFromErrno(xc_error); 15.237 + return pyxc_error_to_exception(); 15.238 15.239 return Py_BuildValue("{s:i,s:L,s:L,s:L,s:i,s:i}", 15.240 "domid", domid, 15.241 @@ -638,7 +652,7 @@ static PyObject *pyxc_shadow_control(PyO 15.242 15.243 if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, NULL, 0, NULL) 15.244 < 0 ) 15.245 - return PyErr_SetFromErrno(xc_error); 15.246 + return pyxc_error_to_exception(); 15.247 15.248 Py_INCREF(zero); 15.249 return zero; 15.250 @@ -668,7 +682,7 @@ static PyObject *pyxc_shadow_mem_control 15.251 op = XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION; 15.252 } 15.253 if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, &mb, 0, NULL) < 0 ) 15.254 - return PyErr_SetFromErrno(xc_error); 15.255 + return pyxc_error_to_exception(); 15.256 15.257 mbarg = mb; 15.258 return Py_BuildValue("i", mbarg); 15.259 @@ -678,7 +692,7 @@ static PyObject *pyxc_sched_id_get(XcObj 15.260 15.261 int sched_id; 15.262 if (xc_sched_id(self->xc_handle, &sched_id) != 0) 15.263 - return PyErr_SetFromErrno(xc_error); 15.264 + return PyErr_SetFromErrno(xc_error_obj); 15.265 15.266 return Py_BuildValue("i", sched_id); 15.267 } 15.268 @@ -704,7 +718,7 @@ static PyObject *pyxc_sched_credit_domai 15.269 sdom.cap = cap; 15.270 15.271 if ( xc_sched_credit_domain_set(self->xc_handle, domid, &sdom) != 0 ) 15.272 - return PyErr_SetFromErrno(xc_error); 15.273 + return pyxc_error_to_exception(); 15.274 15.275 Py_INCREF(zero); 15.276 return zero; 15.277 @@ -719,7 +733,7 @@ static PyObject *pyxc_sched_credit_domai 15.278 return NULL; 15.279 15.280 if ( xc_sched_credit_domain_get(self->xc_handle, domid, &sdom) != 0 ) 15.281 - return PyErr_SetFromErrno(xc_error); 15.282 + return pyxc_error_to_exception(); 15.283 15.284 return Py_BuildValue("{s:H,s:H}", 15.285 "weight", sdom.weight, 15.286 @@ -735,7 +749,7 @@ static PyObject *pyxc_domain_setmaxmem(X 15.287 return NULL; 15.288 15.289 if (xc_domain_setmaxmem(self->xc_handle, dom, maxmem_kb) != 0) 15.290 - return PyErr_SetFromErrno(xc_error); 15.291 + return pyxc_error_to_exception(); 15.292 15.293 Py_INCREF(zero); 15.294 return zero; 15.295 @@ -762,7 +776,7 @@ static PyObject *pyxc_domain_memory_incr 15.296 if ( xc_domain_memory_increase_reservation(self->xc_handle, dom, 15.297 nr_extents, extent_order, 15.298 address_bits, NULL) ) 15.299 - return PyErr_SetFromErrno(xc_error); 15.300 + return pyxc_error_to_exception(); 15.301 15.302 Py_INCREF(zero); 15.303 return zero; 15.304 @@ -784,7 +798,7 @@ static PyObject *pyxc_domain_ioport_perm 15.305 ret = xc_domain_ioport_permission( 15.306 self->xc_handle, dom, first_port, nr_ports, allow_access); 15.307 if ( ret != 0 ) 15.308 - return PyErr_SetFromErrno(xc_error); 15.309 + return pyxc_error_to_exception(); 15.310 15.311 Py_INCREF(zero); 15.312 return zero; 15.313 @@ -807,7 +821,7 @@ static PyObject *pyxc_domain_irq_permiss 15.314 ret = xc_domain_irq_permission( 15.315 xc->xc_handle, dom, pirq, allow_access); 15.316 if ( ret != 0 ) 15.317 - return PyErr_SetFromErrno(xc_error); 15.318 + return pyxc_error_to_exception(); 15.319 15.320 Py_INCREF(zero); 15.321 return zero; 15.322 @@ -830,7 +844,7 @@ static PyObject *pyxc_domain_iomem_permi 15.323 ret = xc_domain_iomem_permission( 15.324 xc->xc_handle, dom, first_pfn, nr_pfns, allow_access); 15.325 if ( ret != 0 ) 15.326 - return PyErr_SetFromErrno(xc_error); 15.327 + return pyxc_error_to_exception(); 15.328 15.329 Py_INCREF(zero); 15.330 return zero; 15.331 @@ -870,7 +884,7 @@ static PyObject *dom_op(XcObject *self, 15.332 return NULL; 15.333 15.334 if (fn(self->xc_handle, dom) != 0) 15.335 - return PyErr_SetFromErrno(xc_error); 15.336 + return pyxc_error_to_exception(); 15.337 15.338 Py_INCREF(zero); 15.339 return zero; 15.340 @@ -1199,7 +1213,7 @@ static int 15.341 PyXc_init(XcObject *self, PyObject *args, PyObject *kwds) 15.342 { 15.343 if ((self->xc_handle = xc_interface_open()) == -1) { 15.344 - PyErr_SetFromErrno(xc_error); 15.345 + pyxc_error_to_exception(); 15.346 return -1; 15.347 } 15.348 15.349 @@ -1272,7 +1286,7 @@ PyMODINIT_FUNC initxc(void) 15.350 if (m == NULL) 15.351 return; 15.352 15.353 - xc_error = PyErr_NewException(PKG ".Error", PyExc_RuntimeError, NULL); 15.354 + xc_error_obj = PyErr_NewException(PKG ".Error", PyExc_RuntimeError, NULL); 15.355 zero = PyInt_FromLong(0); 15.356 15.357 /* KAF: This ensures that we get debug output in a timely manner. */ 15.358 @@ -1282,8 +1296,8 @@ PyMODINIT_FUNC initxc(void) 15.359 Py_INCREF(&PyXcType); 15.360 PyModule_AddObject(m, CLS, (PyObject *)&PyXcType); 15.361 15.362 - Py_INCREF(xc_error); 15.363 - PyModule_AddObject(m, "Error", xc_error); 15.364 + Py_INCREF(xc_error_obj); 15.365 + PyModule_AddObject(m, "Error", xc_error_obj); 15.366 15.367 /* Expose some libxc constants to Python */ 15.368 PyModule_AddIntConstant(m, "XEN_SCHEDULER_SEDF", XEN_SCHEDULER_SEDF);
16.1 --- a/tools/python/xen/xend/server/blkif.py Thu Dec 07 11:41:38 2006 +0000 16.2 +++ b/tools/python/xen/xend/server/blkif.py Thu Dec 07 11:44:05 2006 +0000 16.3 @@ -124,6 +124,8 @@ class BlkifController(DevController): 16.4 config['dev'] = dev 16.5 if typ and params: 16.6 config['uname'] = typ +':' + params 16.7 + else: 16.8 + config['uname'] = None 16.9 if mode: 16.10 config['mode'] = mode 16.11 if uuid:
17.1 --- a/xen/arch/x86/boot/x86_32.S Thu Dec 07 11:41:38 2006 +0000 17.2 +++ b/xen/arch/x86/boot/x86_32.S Thu Dec 07 11:44:05 2006 +0000 17.3 @@ -1,4 +1,5 @@ 17.4 #include <xen/config.h> 17.5 +#include <xen/multiboot.h> 17.6 #include <public/xen.h> 17.7 #include <asm/asm_defns.h> 17.8 #include <asm/desc.h> 17.9 @@ -17,12 +18,14 @@ ENTRY(_stext) 17.10 .align 4 17.11 17.12 /*** MULTIBOOT HEADER ****/ 17.13 +#define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_HEADER_MODS_ALIGNED | \ 17.14 + MULTIBOOT_HEADER_WANT_MEMORY) 17.15 /* Magic number indicating a Multiboot header. */ 17.16 - .long 0x1BADB002 17.17 + .long MULTIBOOT_HEADER_MAGIC 17.18 /* Flags to bootloader (see Multiboot spec). */ 17.19 - .long 0x00000003 17.20 + .long MULTIBOOT_HEADER_FLAGS 17.21 /* Checksum: must be the negated sum of the first two fields. */ 17.22 - .long -0x1BADB005 17.23 + .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) 17.24 17.25 not_multiboot_msg: 17.26 .asciz "ERR: Not a Multiboot bootloader!"
18.1 --- a/xen/arch/x86/boot/x86_64.S Thu Dec 07 11:41:38 2006 +0000 18.2 +++ b/xen/arch/x86/boot/x86_64.S Thu Dec 07 11:44:05 2006 +0000 18.3 @@ -1,4 +1,5 @@ 18.4 #include <xen/config.h> 18.5 +#include <xen/multiboot.h> 18.6 #include <public/xen.h> 18.7 #include <asm/asm_defns.h> 18.8 #include <asm/desc.h> 18.9 @@ -19,12 +20,14 @@ ENTRY(_stext) 18.10 18.11 .org 0x004 18.12 /*** MULTIBOOT HEADER ****/ 18.13 +#define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_HEADER_MODS_ALIGNED | \ 18.14 + MULTIBOOT_HEADER_WANT_MEMORY) 18.15 /* Magic number indicating a Multiboot header. */ 18.16 - .long 0x1BADB002 18.17 + .long MULTIBOOT_HEADER_MAGIC 18.18 /* Flags to bootloader (see Multiboot spec). */ 18.19 - .long 0x00000003 18.20 + .long MULTIBOOT_HEADER_FLAGS 18.21 /* Checksum: must be the negated sum of the first two fields. */ 18.22 - .long -0x1BADB005 18.23 + .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) 18.24 18.25 .Lbad_cpu_msg: .asciz "ERR: Not a 64-bit CPU!" 18.26 .Lbad_ldr_msg: .asciz "ERR: Not a Multiboot bootloader!"
19.1 --- a/xen/include/xen/multiboot.h Thu Dec 07 11:41:38 2006 +0000 19.2 +++ b/xen/include/xen/multiboot.h Thu Dec 07 11:44:05 2006 +0000 19.3 @@ -18,16 +18,30 @@ 19.4 #ifndef __MULTIBOOT_H__ 19.5 #define __MULTIBOOT_H__ 19.6 19.7 + 19.8 +/* 19.9 + * Multiboot header structure. 19.10 + */ 19.11 +#define MULTIBOOT_HEADER_MAGIC 0x1BADB002 19.12 +#define MULTIBOOT_HEADER_MODS_ALIGNED 0x00000001 19.13 +#define MULTIBOOT_HEADER_WANT_MEMORY 0x00000002 19.14 +#define MULTIBOOT_HEADER_HAS_VBE 0x00000004 19.15 +#define MULTIBOOT_HEADER_HAS_ADDR 0x00010000 19.16 + 19.17 /* The magic number passed by a Multiboot-compliant boot loader. */ 19.18 -#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002 19.19 +#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002 19.20 19.21 #define MBI_MEMLIMITS (1<<0) 19.22 #define MBI_DRIVES (1<<1) 19.23 #define MBI_CMDLINE (1<<2) 19.24 #define MBI_MODULES (1<<3) 19.25 +#define MBI_AOUT_SYMS (1<<4) 19.26 +#define MBI_ELF_SYMS (1<<5) 19.27 #define MBI_MEMMAP (1<<6) 19.28 #define MBI_LOADERNAME (1<<9) 19.29 19.30 +#ifndef __ASSEMBLY__ 19.31 + 19.32 /* The symbol table for a.out. */ 19.33 typedef struct { 19.34 u32 tabsize; 19.35 @@ -47,16 +61,28 @@ typedef struct { 19.36 /* The Multiboot information. */ 19.37 typedef struct { 19.38 u32 flags; 19.39 + 19.40 + /* Valid if flags sets MBI_MEMLIMITS */ 19.41 u32 mem_lower; 19.42 u32 mem_upper; 19.43 + 19.44 + /* Valid if flags sets MBI_DRIVES */ 19.45 u32 boot_device; 19.46 + 19.47 + /* Valid if flags sets MBI_CMDLINE */ 19.48 u32 cmdline; 19.49 + 19.50 + /* Valid if flags sets MBI_MODULES */ 19.51 u32 mods_count; 19.52 u32 mods_addr; 19.53 + 19.54 + /* Valid if flags sets ... */ 19.55 union { 19.56 - aout_symbol_table_t aout_sym; 19.57 - elf_section_header_table_t elf_sec; 19.58 + aout_symbol_table_t aout_sym; /* ... MBI_AOUT_SYMS */ 19.59 + elf_section_header_table_t elf_sec; /* ... MBI_ELF_SYMS */ 19.60 } u; 19.61 + 19.62 + /* Valid if flags sets MBI_MEMMAP */ 19.63 u32 mmap_length; 19.64 u32 mmap_addr; 19.65 } multiboot_info_t; 19.66 @@ -80,4 +106,7 @@ typedef struct { 19.67 u32 type; 19.68 } memory_map_t; 19.69 19.70 + 19.71 +#endif /* __ASSEMBLY__ */ 19.72 + 19.73 #endif /* __MULTIBOOT_H__ */