ia64/xen-unstable

view xen/arch/x86/hvm/vmx/realmode.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 6595393a3d28
children
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007-2008 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <xen/paging.h>
17 #include <asm/event.h>
18 #include <asm/hvm/emulate.h>
19 #include <asm/hvm/hvm.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/vmx/vmx.h>
22 #include <asm/hvm/vmx/vmcs.h>
24 static void realmode_deliver_exception(
25 unsigned int vector,
26 unsigned int insn_len,
27 struct hvm_emulate_ctxt *hvmemul_ctxt)
28 {
29 struct segment_register *idtr, *csr;
30 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
31 uint32_t cs_eip, pstk;
32 uint16_t frame[3];
33 unsigned int last_byte;
35 idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
36 csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
37 __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
39 again:
40 last_byte = (vector * 4) + 3;
41 if ( idtr->limit < last_byte )
42 {
43 /* Software interrupt? */
44 if ( insn_len != 0 )
45 {
46 insn_len = 0;
47 vector = TRAP_gp_fault;
48 goto again;
49 }
51 /* Exception or hardware interrupt. */
52 switch ( vector )
53 {
54 case TRAP_double_fault:
55 hvm_triple_fault();
56 return;
57 case TRAP_gp_fault:
58 vector = TRAP_double_fault;
59 goto again;
60 default:
61 vector = TRAP_gp_fault;
62 goto again;
63 }
64 }
66 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
68 frame[0] = regs->eip + insn_len;
69 frame[1] = csr->sel;
70 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
72 /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
73 if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
74 {
75 regs->esp -= 6;
76 pstk = regs->esp;
77 }
78 else
79 {
80 pstk = (uint16_t)(regs->esp - 6);
81 regs->esp &= ~0xffff;
82 regs->esp |= pstk;
83 }
85 pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
86 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
88 csr->sel = cs_eip >> 16;
89 csr->base = (uint32_t)csr->sel << 4;
90 regs->eip = (uint16_t)cs_eip;
91 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
93 /* Exception delivery clears STI and MOV-SS blocking. */
94 if ( hvmemul_ctxt->intr_shadow &
95 (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
96 {
97 hvmemul_ctxt->intr_shadow &=
98 ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
99 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
100 }
101 }
103 static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
104 {
105 struct vcpu *curr = current;
106 uint32_t intr_info;
107 int rc;
109 perfc_incr(realmode_emulations);
111 rc = hvm_emulate_one(hvmemul_ctxt);
113 if ( rc == X86EMUL_UNHANDLEABLE )
114 {
115 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
116 goto fail;
117 }
119 if ( rc == X86EMUL_EXCEPTION )
120 {
121 if ( !hvmemul_ctxt->exn_pending )
122 {
123 intr_info = __vmread(VM_ENTRY_INTR_INFO);
124 __vmwrite(VM_ENTRY_INTR_INFO, 0);
125 if ( !(intr_info & INTR_INFO_VALID_MASK) )
126 {
127 gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
128 goto fail;
129 }
130 hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
131 hvmemul_ctxt->exn_insn_len = 0;
132 }
134 if ( unlikely(curr->domain->debugger_attached) &&
135 ((hvmemul_ctxt->exn_vector == TRAP_debug) ||
136 (hvmemul_ctxt->exn_vector == TRAP_int3)) )
137 {
138 domain_pause_for_debugger();
139 }
140 else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
141 {
142 gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
143 hvmemul_ctxt->exn_vector);
144 goto fail;
145 }
146 else
147 {
148 realmode_deliver_exception(
149 hvmemul_ctxt->exn_vector,
150 hvmemul_ctxt->exn_insn_len,
151 hvmemul_ctxt);
152 }
153 }
155 return;
157 fail:
158 gdprintk(XENLOG_ERR,
159 "Real-mode emulation failed @ %04x:%08lx: "
160 "%02x %02x %02x %02x %02x %02x\n",
161 hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
162 hvmemul_ctxt->insn_buf_eip,
163 hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
164 hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
165 hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
166 domain_crash(curr->domain);
167 }
169 void vmx_realmode(struct cpu_user_regs *regs)
170 {
171 struct vcpu *curr = current;
172 struct hvm_emulate_ctxt hvmemul_ctxt;
173 struct segment_register *sreg;
174 unsigned long intr_info;
175 unsigned int emulations = 0;
177 /* Get-and-clear VM_ENTRY_INTR_INFO. */
178 intr_info = __vmread(VM_ENTRY_INTR_INFO);
179 if ( intr_info & INTR_INFO_VALID_MASK )
180 __vmwrite(VM_ENTRY_INTR_INFO, 0);
182 hvm_emulate_prepare(&hvmemul_ctxt, regs);
184 if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed )
185 realmode_emulate_one(&hvmemul_ctxt);
187 /* Only deliver interrupts into emulated real mode. */
188 if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
189 (intr_info & INTR_INFO_VALID_MASK) )
190 {
191 realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
192 intr_info = 0;
193 }
195 curr->arch.hvm_vmx.vmx_emulate = 1;
196 while ( curr->arch.hvm_vmx.vmx_emulate &&
197 !softirq_pending(smp_processor_id()) &&
198 (curr->arch.hvm_vcpu.io_state == HVMIO_none) )
199 {
200 /*
201 * Check for pending interrupts only every 16 instructions, because
202 * hvm_local_events_need_delivery() is moderately expensive, and only
203 * in real mode, because we don't emulate protected-mode IDT vectoring.
204 */
205 if ( unlikely(!(++emulations & 15)) &&
206 curr->arch.hvm_vmx.vmx_realmode &&
207 hvm_local_events_need_delivery(curr) )
208 break;
210 realmode_emulate_one(&hvmemul_ctxt);
212 /* Stop emulating unless our segment state is not safe */
213 if ( curr->arch.hvm_vmx.vmx_realmode )
214 curr->arch.hvm_vmx.vmx_emulate =
215 (curr->arch.hvm_vmx.vm86_segment_mask != 0);
216 else
217 curr->arch.hvm_vmx.vmx_emulate =
218 ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
219 || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
220 }
222 /* Need to emulate next time if we've started an IO operation */
223 if ( curr->arch.hvm_vcpu.io_state != HVMIO_none )
224 curr->arch.hvm_vmx.vmx_emulate = 1;
226 if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
227 {
228 /*
229 * Cannot enter protected mode with bogus selector RPLs and DPLs.
230 * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
231 * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
232 */
233 sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
234 sreg->attr.fields.dpl = sreg->sel & 3;
235 sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
236 sreg->attr.fields.dpl = sreg->sel & 3;
237 sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
238 sreg->attr.fields.dpl = sreg->sel & 3;
239 sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
240 sreg->attr.fields.dpl = sreg->sel & 3;
241 hvmemul_ctxt.seg_reg_dirty |=
242 (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
243 (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
244 }
246 hvm_emulate_writeback(&hvmemul_ctxt);
248 /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
249 if ( intr_info & INTR_INFO_VALID_MASK )
250 __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
251 }