Line data Source code
1 : /******************************************************************************
2 : * x86_emulate.h
3 : *
4 : * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 : *
6 : * Copyright (c) 2005-2007 Keir Fraser
7 : * Copyright (c) 2005-2007 XenSource Inc.
8 : *
9 : * This program is free software; you can redistribute it and/or modify
10 : * it under the terms of the GNU General Public License as published by
11 : * the Free Software Foundation; either version 2 of the License, or
12 : * (at your option) any later version.
13 : *
14 : * This program is distributed in the hope that it will be useful,
15 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : * GNU General Public License for more details.
18 : *
19 : * You should have received a copy of the GNU General Public License
20 : * along with this program; If not, see <http://www.gnu.org/licenses/>.
21 : */
22 :
23 : #ifndef __X86_EMULATE_H__
24 : #define __X86_EMULATE_H__
25 :
26 : #define MAX_INST_LEN 15
27 :
28 : struct x86_emulate_ctxt;
29 :
30 : /*
31 : * Comprehensive enumeration of x86 segment registers. Various bits of code
32 : * rely on this order (general purpose before system, tr at the beginning of
33 : * system).
34 : */
35 : enum x86_segment {
36 : /* General purpose. Matches the SReg3 encoding in opcode/ModRM bytes. */
37 : x86_seg_es,
38 : x86_seg_cs,
39 : x86_seg_ss,
40 : x86_seg_ds,
41 : x86_seg_fs,
42 : x86_seg_gs,
43 : /* System: Valid to use for implicit table references. */
44 : x86_seg_tr,
45 : x86_seg_ldtr,
46 : x86_seg_gdtr,
47 : x86_seg_idtr,
48 : /* No Segment: For accesses which are already linear. */
49 : x86_seg_none
50 : };
51 :
52 2697139 : static inline bool is_x86_user_segment(enum x86_segment seg)
53 : {
54 2697139 : unsigned int idx = seg;
55 :
56 2697139 : return idx <= x86_seg_gs;
57 : }
58 34652 : static inline bool is_x86_system_segment(enum x86_segment seg)
59 : {
60 34652 : return seg >= x86_seg_tr && seg < x86_seg_none;
61 : }
62 :
63 : /*
64 : * x86 event types. This enumeration is valid for:
65 : * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
66 : * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
67 : */
68 : enum x86_event_type {
69 : X86_EVENTTYPE_EXT_INTR, /* External interrupt */
70 : X86_EVENTTYPE_NMI = 2, /* NMI */
71 : X86_EVENTTYPE_HW_EXCEPTION, /* Hardware exception */
72 : X86_EVENTTYPE_SW_INTERRUPT, /* Software interrupt (CD nn) */
73 : X86_EVENTTYPE_PRI_SW_EXCEPTION, /* ICEBP (F1) */
74 : X86_EVENTTYPE_SW_EXCEPTION, /* INT3 (CC), INTO (CE) */
75 : };
76 : #define X86_EVENT_NO_EC (-1) /* No error code. */
77 :
78 : struct x86_event {
79 : int16_t vector;
80 : uint8_t type; /* X86_EVENTTYPE_* */
81 : uint8_t insn_len; /* Instruction length */
82 : int32_t error_code; /* X86_EVENT_NO_EC if n/a */
83 : unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
84 : };
85 :
86 : /*
87 : * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
88 : * segment descriptor. It happens to match the format of an AMD SVM VMCB.
89 : */
90 : typedef union segment_attributes {
91 : uint16_t bytes;
92 : struct
93 : {
94 : uint16_t type:4; /* 0; Bit 40-43 */
95 : uint16_t s: 1; /* 4; Bit 44 */
96 : uint16_t dpl: 2; /* 5; Bit 45-46 */
97 : uint16_t p: 1; /* 7; Bit 47 */
98 : uint16_t avl: 1; /* 8; Bit 52 */
99 : uint16_t l: 1; /* 9; Bit 53 */
100 : uint16_t db: 1; /* 10; Bit 54 */
101 : uint16_t g: 1; /* 11; Bit 55 */
102 : uint16_t pad: 4;
103 : } fields;
104 : } segment_attributes_t;
105 :
106 : /*
107 : * Full state of a segment register (visible and hidden portions).
108 : * Again, this happens to match the format of an AMD SVM VMCB.
109 : */
110 : struct segment_register {
111 : uint16_t sel;
112 : segment_attributes_t attr;
113 : uint32_t limit;
114 : uint64_t base;
115 : };
116 :
117 : struct x86_emul_fpu_aux {
118 : unsigned long ip, dp;
119 : uint16_t cs, ds;
120 : unsigned int op:11;
121 : unsigned int dval:1;
122 : };
123 :
124 : /*
125 : * Return codes from state-accessor functions and from x86_emulate().
126 : */
127 : /* Completed successfully. State modified appropriately. */
128 : #define X86EMUL_OKAY 0
129 : /* Unhandleable access or emulation. No state modified. */
130 : #define X86EMUL_UNHANDLEABLE 1
131 : /* Exception raised and requires delivery. */
132 : #define X86EMUL_EXCEPTION 2
133 : /* Retry the emulation for some reason. No state modified. */
134 : #define X86EMUL_RETRY 3
135 : /*
136 : * Operation fully done by one of the hooks:
137 : * - validate(): operation completed (except common insn retire logic)
138 : * - read_segment(x86_seg_tr, ...): bypass I/O bitmap access
139 : * - read_io() / write_io(): bypass GPR update (non-string insns only)
140 : * Undefined behavior when used anywhere else.
141 : */
142 : #define X86EMUL_DONE 4
143 :
144 : /* FPU sub-types which may be requested via ->get_fpu(). */
145 : enum x86_emulate_fpu_type {
146 : X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
147 : X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
148 : X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
149 : X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
150 : X86EMUL_FPU_ymm, /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
151 : /* This sentinel will never be passed to ->get_fpu(). */
152 : X86EMUL_FPU_none
153 : };
154 :
155 : struct cpuid_leaf
156 : {
157 : uint32_t a, b, c, d;
158 : };
159 :
160 : struct x86_emulate_state;
161 :
162 : /*
163 : * These operations represent the instruction emulator's interface to memory,
164 : * I/O ports, privileged state... pretty much everything other than GPRs.
165 : *
166 : * NOTES:
167 : * 1. If the access fails (cannot emulate, or a standard access faults) then
168 : * it is up to the memop to propagate the fault to the guest VM via
169 : * some out-of-band mechanism, unknown to the emulator. The memop signals
170 : * failure by returning X86EMUL_EXCEPTION to the emulator, which will
171 : * then immediately bail.
172 : * 2. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
173 : */
174 : struct x86_emulate_ops
175 : {
176 : /*
177 : * All functions:
178 : * @ctxt: [IN ] Emulation context info as passed to the emulator.
179 : * All memory-access functions:
180 : * @seg: [IN ] Segment being dereferenced (specified as x86_seg_??).
181 : * @offset:[IN ] Offset within segment.
182 : * @p_data:[IN ] Pointer to i/o data buffer (length is @bytes)
183 : * Read functions:
184 : * @val: [OUT] Value read, zero-extended to 'ulong'.
185 : * Write functions:
186 : * @val: [IN ] Value to write (low-order bytes used as req'd).
187 : * Variable-length access functions:
188 : * @bytes: [IN ] Number of bytes to read or write. Valid access sizes are
189 : * 1, 2, 4 and 8 (x86/64 only) bytes, unless otherwise
190 : * stated.
191 : */
192 :
193 : /*
194 : * read: Emulate a memory read.
195 : * @bytes: Access length (0 < @bytes < 4096).
196 : */
197 : int (*read)(
198 : enum x86_segment seg,
199 : unsigned long offset,
200 : void *p_data,
201 : unsigned int bytes,
202 : struct x86_emulate_ctxt *ctxt);
203 :
204 : /*
205 : * insn_fetch: Emulate fetch from instruction byte stream.
206 : * Except for @bytes, all parameters are the same as for 'read'.
207 : * @bytes: Access length (0 <= @bytes < 16, with zero meaning
208 : * "validate address only").
209 : * @seg is always x86_seg_cs.
210 : */
211 : int (*insn_fetch)(
212 : enum x86_segment seg,
213 : unsigned long offset,
214 : void *p_data,
215 : unsigned int bytes,
216 : struct x86_emulate_ctxt *ctxt);
217 :
218 : /*
219 : * write: Emulate a memory write.
220 : * @bytes: Access length (0 < @bytes < 4096).
221 : */
222 : int (*write)(
223 : enum x86_segment seg,
224 : unsigned long offset,
225 : void *p_data,
226 : unsigned int bytes,
227 : struct x86_emulate_ctxt *ctxt);
228 :
229 : /*
230 : * cmpxchg: Emulate an atomic (LOCKed) CMPXCHG operation.
231 : * @p_old: [IN ] Pointer to value expected to be current at @addr.
232 : * @p_new: [IN ] Pointer to value to write to @addr.
233 : * @bytes: [IN ] Operation size (up to 8 (x86/32) or 16 (x86/64) bytes).
234 : */
235 : int (*cmpxchg)(
236 : enum x86_segment seg,
237 : unsigned long offset,
238 : void *p_old,
239 : void *p_new,
240 : unsigned int bytes,
241 : struct x86_emulate_ctxt *ctxt);
242 :
243 : /*
244 : * validate: Post-decode, pre-emulate hook to allow caller controlled
245 : * filtering.
246 : */
247 : int (*validate)(
248 : const struct x86_emulate_state *state,
249 : struct x86_emulate_ctxt *ctxt);
250 :
251 : /*
252 : * rep_ins: Emulate INS: <src_port> -> <dst_seg:dst_offset>.
253 : * @bytes_per_rep: [IN ] Bytes transferred per repetition.
254 : * @reps: [IN ] Maximum repetitions to be emulated.
255 : * [OUT] Number of repetitions actually emulated.
256 : */
257 : int (*rep_ins)(
258 : uint16_t src_port,
259 : enum x86_segment dst_seg,
260 : unsigned long dst_offset,
261 : unsigned int bytes_per_rep,
262 : unsigned long *reps,
263 : struct x86_emulate_ctxt *ctxt);
264 :
265 : /*
266 : * rep_outs: Emulate OUTS: <src_seg:src_offset> -> <dst_port>.
267 : * @bytes_per_rep: [IN ] Bytes transferred per repetition.
268 : * @reps: [IN ] Maximum repetitions to be emulated.
269 : * [OUT] Number of repetitions actually emulated.
270 : */
271 : int (*rep_outs)(
272 : enum x86_segment src_seg,
273 : unsigned long src_offset,
274 : uint16_t dst_port,
275 : unsigned int bytes_per_rep,
276 : unsigned long *reps,
277 : struct x86_emulate_ctxt *ctxt);
278 :
279 : /*
280 : * rep_movs: Emulate MOVS: <src_seg:src_offset> -> <dst_seg:dst_offset>.
281 : * @bytes_per_rep: [IN ] Bytes transferred per repetition.
282 : * @reps: [IN ] Maximum repetitions to be emulated.
283 : * [OUT] Number of repetitions actually emulated.
284 : */
285 : int (*rep_movs)(
286 : enum x86_segment src_seg,
287 : unsigned long src_offset,
288 : enum x86_segment dst_seg,
289 : unsigned long dst_offset,
290 : unsigned int bytes_per_rep,
291 : unsigned long *reps,
292 : struct x86_emulate_ctxt *ctxt);
293 :
294 : /*
295 : * rep_stos: Emulate STOS: <*p_data> -> <seg:offset>.
296 : * @bytes_per_rep: [IN ] Bytes transferred per repetition.
297 : * @reps: [IN ] Maximum repetitions to be emulated.
298 : * [OUT] Number of repetitions actually emulated.
299 : */
300 : int (*rep_stos)(
301 : void *p_data,
302 : enum x86_segment seg,
303 : unsigned long offset,
304 : unsigned int bytes_per_rep,
305 : unsigned long *reps,
306 : struct x86_emulate_ctxt *ctxt);
307 :
308 : /*
309 : * read_segment: Emulate a read of full context of a segment register.
310 : * @reg: [OUT] Contents of segment register (visible and hidden state).
311 : */
312 : int (*read_segment)(
313 : enum x86_segment seg,
314 : struct segment_register *reg,
315 : struct x86_emulate_ctxt *ctxt);
316 :
317 : /*
318 : * write_segment: Emulate a read of full context of a segment register.
319 : * @reg: [OUT] Contents of segment register (visible and hidden state).
320 : */
321 : int (*write_segment)(
322 : enum x86_segment seg,
323 : const struct segment_register *reg,
324 : struct x86_emulate_ctxt *ctxt);
325 :
326 : /*
327 : * read_io: Read from I/O port(s).
328 : * @port: [IN ] Base port for access.
329 : */
330 : int (*read_io)(
331 : unsigned int port,
332 : unsigned int bytes,
333 : unsigned long *val,
334 : struct x86_emulate_ctxt *ctxt);
335 :
336 : /*
337 : * write_io: Write to I/O port(s).
338 : * @port: [IN ] Base port for access.
339 : */
340 : int (*write_io)(
341 : unsigned int port,
342 : unsigned int bytes,
343 : unsigned long val,
344 : struct x86_emulate_ctxt *ctxt);
345 :
346 : /*
347 : * read_cr: Read from control register.
348 : * @reg: [IN ] Register to read (0-15).
349 : */
350 : int (*read_cr)(
351 : unsigned int reg,
352 : unsigned long *val,
353 : struct x86_emulate_ctxt *ctxt);
354 :
355 : /*
356 : * write_cr: Write to control register.
357 : * @reg: [IN ] Register to write (0-15).
358 : */
359 : int (*write_cr)(
360 : unsigned int reg,
361 : unsigned long val,
362 : struct x86_emulate_ctxt *ctxt);
363 :
364 : /*
365 : * read_dr: Read from debug register.
366 : * @reg: [IN ] Register to read (0-15).
367 : */
368 : int (*read_dr)(
369 : unsigned int reg,
370 : unsigned long *val,
371 : struct x86_emulate_ctxt *ctxt);
372 :
373 : /*
374 : * write_dr: Write to debug register.
375 : * @reg: [IN ] Register to write (0-15).
376 : */
377 : int (*write_dr)(
378 : unsigned int reg,
379 : unsigned long val,
380 : struct x86_emulate_ctxt *ctxt);
381 :
382 : /*
383 : * read_msr: Read from model-specific register.
384 : * @reg: [IN ] Register to read.
385 : */
386 : int (*read_msr)(
387 : unsigned int reg,
388 : uint64_t *val,
389 : struct x86_emulate_ctxt *ctxt);
390 :
391 : /*
392 : * write_dr: Write to model-specific register.
393 : * @reg: [IN ] Register to write.
394 : */
395 : int (*write_msr)(
396 : unsigned int reg,
397 : uint64_t val,
398 : struct x86_emulate_ctxt *ctxt);
399 :
400 : /* wbinvd: Write-back and invalidate cache contents. */
401 : int (*wbinvd)(
402 : struct x86_emulate_ctxt *ctxt);
403 :
404 : /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
405 : int (*cpuid)(
406 : uint32_t leaf,
407 : uint32_t subleaf,
408 : struct cpuid_leaf *res,
409 : struct x86_emulate_ctxt *ctxt);
410 :
411 : /*
412 : * get_fpu: Load emulated environment's FPU state onto processor.
413 : * @exn_callback: On any FPU or SIMD exception, pass control to
414 : * (*exception_callback)(exception_callback_arg, regs).
415 : */
416 : int (*get_fpu)(
417 : void (*exception_callback)(void *, struct cpu_user_regs *),
418 : void *exception_callback_arg,
419 : enum x86_emulate_fpu_type type,
420 : struct x86_emulate_ctxt *ctxt);
421 :
422 : /*
423 : * put_fpu: Relinquish the FPU. Unhook from FPU/SIMD exception handlers.
424 : * The handler, if installed, must be prepared to get called without
425 : * the get_fpu one having got called before!
426 : * @backout: Undo updates to the specified register file (can, besides
427 : * X86EMUL_FPU_none, only be X86EMUL_FPU_fpu at present);
428 : * @aux: Packaged up FIP/FDP/FOP values to load into FPU.
429 : */
430 : void (*put_fpu)(
431 : struct x86_emulate_ctxt *ctxt,
432 : enum x86_emulate_fpu_type backout,
433 : const struct x86_emul_fpu_aux *aux);
434 :
435 : /* invlpg: Invalidate paging structures which map addressed byte. */
436 : int (*invlpg)(
437 : enum x86_segment seg,
438 : unsigned long offset,
439 : struct x86_emulate_ctxt *ctxt);
440 :
441 : /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
442 : int (*vmfunc)(
443 : struct x86_emulate_ctxt *ctxt);
444 : };
445 :
446 : struct cpu_user_regs;
447 :
448 : struct x86_emulate_ctxt
449 : {
450 : /*
451 : * Input-only state:
452 : */
453 :
454 : /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
455 : unsigned char vendor;
456 :
457 : /* Set this if writes may have side effects. */
458 : bool force_writeback;
459 :
460 : /* Caller data that can be used by x86_emulate_ops' routines. */
461 : void *data;
462 :
463 : /*
464 : * Input/output state:
465 : */
466 :
467 : /* Register state before/after emulation. */
468 : struct cpu_user_regs *regs;
469 :
470 : /* Default address size in current execution mode (16, 32, or 64). */
471 : unsigned int addr_size;
472 :
473 : /* Stack pointer width in bits (16, 32 or 64). */
474 : unsigned int sp_size;
475 :
476 : /* Long mode active? */
477 : bool lma;
478 :
479 : /*
480 : * Output-only state:
481 : */
482 :
483 : /* Canonical opcode (see below) (valid only on X86EMUL_OKAY). */
484 : unsigned int opcode;
485 :
486 : /* Retirement state, set by the emulator (valid only on X86EMUL_OKAY). */
487 : union {
488 : uint8_t raw;
489 : struct {
490 : bool hlt:1; /* Instruction HLTed. */
491 : bool mov_ss:1; /* Instruction sets MOV-SS irq shadow. */
492 : bool sti:1; /* Instruction sets STI irq shadow. */
493 : bool singlestep:1; /* Singlestepping was active. */
494 : };
495 : } retire;
496 :
497 : bool event_pending;
498 : struct x86_event event;
499 : };
500 :
501 : /*
502 : * Encode opcode extensions in the following way:
503 : * 0x0xxxx for one byte opcodes
504 : * 0x0fxxxx for 0f-prefixed opcodes (or their VEX/EVEX equivalents)
505 : * 0x0f38xxxx for 0f38-prefixed opcodes (or their VEX/EVEX equivalents)
506 : * 0x0f3axxxx for 0f3a-prefixed opcodes (or their VEX/EVEX equivalents)
507 : * 0x8f08xxxx for 8f/8-prefixed XOP opcodes
508 : * 0x8f09xxxx for 8f/9-prefixed XOP opcodes
509 : * 0x8f0axxxx for 8f/a-prefixed XOP opcodes
510 : * The low byte represents the base opcode withing the resepctive space,
511 : * and some of bits 8..15 are used for encoding further information (see
512 : * below).
513 : * Hence no separate #define-s get added.
514 : */
515 : #define X86EMUL_OPC_EXT_MASK 0xffff0000
516 : #define X86EMUL_OPC(ext, byte) ((uint8_t)(byte) | \
517 : MASK_INSR((ext), X86EMUL_OPC_EXT_MASK))
518 : /*
519 : * This includes the 66, F3, and F2 prefixes (see also below)
520 : * as well as VEX/EVEX:
521 : */
522 : #define X86EMUL_OPC_MASK (0x000000ff | X86EMUL_OPC_PFX_MASK | \
523 : X86EMUL_OPC_ENCODING_MASK)
524 :
525 : /*
526 : * Note that prefixes 66, F2, and F3 get encoded only when semantically
527 : * meaningful, to reduce the complexity of interpreting this representation.
528 : */
529 : #define X86EMUL_OPC_PFX_MASK 0x00000300
530 : # define X86EMUL_OPC_66(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000100)
531 : # define X86EMUL_OPC_F3(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000200)
532 : # define X86EMUL_OPC_F2(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000300)
533 :
534 : #define X86EMUL_OPC_ENCODING_MASK 0x00003000
535 : #define X86EMUL_OPC_LEGACY_ 0x00000000
536 : #define X86EMUL_OPC_VEX_ 0x00001000
537 : # define X86EMUL_OPC_VEX(ext, byte) \
538 : (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_VEX_)
539 : # define X86EMUL_OPC_VEX_66(ext, byte) \
540 : (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_VEX_)
541 : # define X86EMUL_OPC_VEX_F3(ext, byte) \
542 : (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_VEX_)
543 : # define X86EMUL_OPC_VEX_F2(ext, byte) \
544 : (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_VEX_)
545 : #define X86EMUL_OPC_EVEX_ 0x00002000
546 : # define X86EMUL_OPC_EVEX(ext, byte) \
547 : (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_EVEX_)
548 : # define X86EMUL_OPC_EVEX_66(ext, byte) \
549 : (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_EVEX_)
550 : # define X86EMUL_OPC_EVEX_F3(ext, byte) \
551 : (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_EVEX_)
552 : # define X86EMUL_OPC_EVEX_F2(ext, byte) \
553 : (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_EVEX_)
554 :
555 : #define X86EMUL_OPC_XOP(ext, byte) X86EMUL_OPC(0x8f##ext, byte)
556 : #define X86EMUL_OPC_XOP_66(ext, byte) X86EMUL_OPC_66(0x8f##ext, byte)
557 : #define X86EMUL_OPC_XOP_F3(ext, byte) X86EMUL_OPC_F3(0x8f##ext, byte)
558 : #define X86EMUL_OPC_XOP_F2(ext, byte) X86EMUL_OPC_F2(0x8f##ext, byte)
559 :
560 : struct x86_emulate_stub {
561 : union {
562 : void (*func)(void);
563 : uintptr_t addr;
564 : };
565 : #ifdef __XEN__
566 : void *ptr;
567 : #else
568 : /* Room for one insn and a (single byte) RET. */
569 : uint8_t buf[MAX_INST_LEN + 1];
570 : #endif
571 : };
572 :
573 : /*
574 : * x86_emulate: Emulate an instruction.
575 : * Returns X86EMUL_* constants.
576 : */
577 : int
578 : x86_emulate(
579 : struct x86_emulate_ctxt *ctxt,
580 : const struct x86_emulate_ops *ops);
581 :
582 : #ifndef NDEBUG
583 : /*
584 : * In debug builds, wrap x86_emulate() with some assertions about its expected
585 : * behaviour.
586 : */
587 : int x86_emulate_wrapper(
588 : struct x86_emulate_ctxt *ctxt,
589 : const struct x86_emulate_ops *ops);
590 : #define x86_emulate x86_emulate_wrapper
591 : #endif
592 :
593 : /*
594 : * Given the 'reg' portion of a ModRM byte, and a register block, return a
595 : * pointer into the block that addresses the relevant register.
596 : * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
597 : */
598 : void *
599 : decode_register(
600 : uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
601 :
602 : /* Unhandleable read, write or instruction fetch */
603 : int
604 : x86emul_unhandleable_rw(
605 : enum x86_segment seg,
606 : unsigned long offset,
607 : void *p_data,
608 : unsigned int bytes,
609 : struct x86_emulate_ctxt *ctxt);
610 :
611 : #ifdef __XEN__
612 :
613 : struct x86_emulate_state *
614 : x86_decode_insn(
615 : struct x86_emulate_ctxt *ctxt,
616 : int (*insn_fetch)(
617 : enum x86_segment seg, unsigned long offset,
618 : void *p_data, unsigned int bytes,
619 : struct x86_emulate_ctxt *ctxt));
620 :
621 : unsigned int
622 : x86_insn_opsize(const struct x86_emulate_state *state);
623 : int
624 : x86_insn_modrm(const struct x86_emulate_state *state,
625 : unsigned int *rm, unsigned int *reg);
626 : unsigned long
627 : x86_insn_operand_ea(const struct x86_emulate_state *state,
628 : enum x86_segment *seg);
629 : unsigned long
630 : x86_insn_immediate(const struct x86_emulate_state *state,
631 : unsigned int nr);
632 : unsigned int
633 : x86_insn_length(const struct x86_emulate_state *state,
634 : const struct x86_emulate_ctxt *ctxt);
635 : bool
636 : x86_insn_is_mem_access(const struct x86_emulate_state *state,
637 : const struct x86_emulate_ctxt *ctxt);
638 : bool
639 : x86_insn_is_mem_write(const struct x86_emulate_state *state,
640 : const struct x86_emulate_ctxt *ctxt);
641 : bool
642 : x86_insn_is_portio(const struct x86_emulate_state *state,
643 : const struct x86_emulate_ctxt *ctxt);
644 : bool
645 : x86_insn_is_cr_access(const struct x86_emulate_state *state,
646 : const struct x86_emulate_ctxt *ctxt);
647 :
648 : #ifdef NDEBUG
649 : static inline void x86_emulate_free_state(struct x86_emulate_state *state) {}
650 : #else
651 : void x86_emulate_free_state(struct x86_emulate_state *state);
652 : #endif
653 :
654 : #endif
655 :
656 16036 : static inline void x86_emul_hw_exception(
657 : unsigned int vector, int error_code, struct x86_emulate_ctxt *ctxt)
658 : {
659 16036 : ASSERT(!ctxt->event_pending);
660 :
661 16036 : ctxt->event.vector = vector;
662 16036 : ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
663 16036 : ctxt->event.error_code = error_code;
664 :
665 16036 : ctxt->event_pending = true;
666 16036 : }
667 :
668 14809 : static inline void x86_emul_pagefault(
669 : int error_code, unsigned long cr2, struct x86_emulate_ctxt *ctxt)
670 : {
671 14809 : ASSERT(!ctxt->event_pending);
672 :
673 14809 : ctxt->event.vector = 14; /* TRAP_page_fault */
674 14809 : ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
675 14809 : ctxt->event.error_code = error_code;
676 14809 : ctxt->event.cr2 = cr2;
677 :
678 14809 : ctxt->event_pending = true;
679 14809 : }
680 :
681 3899941 : static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
682 : {
683 3899941 : ctxt->event_pending = false;
684 3899941 : ctxt->event = (struct x86_event){};
685 3899941 : }
686 :
687 : #endif /* __X86_EMULATE_H__ */
|