} while (0)
#define register_address_adjust(reg, adj) \
_register_address_increment(reg, \
- _regs.eflags & EFLG_DF ? -(adj) : (adj), \
+ _regs._eflags & EFLG_DF ? -(adj) : (adj), \
ad_bytes)
#define sp_pre_dec(dec) ({ \
if ( type >= X86EMUL_FPU_ymm )
{
/* Should be unreachable if VEX decoding is working correctly. */
- ASSERT((cr0 & CR0_PE) && !(ctxt->regs->eflags & EFLG_VM));
+ ASSERT((cr0 & CR0_PE) && !(ctxt->regs->_eflags & EFLG_VM));
}
if ( cr0 & CR0_EM )
{
asm volatile ( _PRE_EFLAGS("[eflags]", "[mask]", "[tmp]") \
"call *%[func];" \
_POST_EFLAGS("[eflags]", "[mask]", "[tmp]") \
- : [eflags] "+g" (_regs.eflags), \
+ : [eflags] "+g" (_regs._eflags), \
[tmp] "=&r" (tmp_) \
: [func] "rm" (stub.func), \
[mask] "i" (EFLG_ZF|EFLG_PF|EFLG_CF) ); \
const struct cpu_user_regs *regs,
int ad_bytes)
{
- return (ad_bytes == 2) ? (uint16_t)regs->ecx :
- (ad_bytes == 4) ? (uint32_t)regs->ecx :
- regs->ecx;
+ return (ad_bytes > 4) ? regs->r(cx)
+ : (ad_bytes < 4) ? regs->cx : regs->_ecx;
}
static inline void put_loop_count(
unsigned long count)
{
if ( ad_bytes == 2 )
- *(uint16_t *)®s->ecx = count;
+ regs->cx = count;
else
- regs->ecx = ad_bytes == 4 ? (uint32_t)count : count;
+ regs->r(cx) = ad_bytes == 4 ? (uint32_t)count : count;
}
#define get_rep_prefix(using_si, using_di) ({ \
*/ \
if ( mode_64bit() && ad_bytes == 4 ) \
{ \
- _regs.ecx = 0; \
- if ( using_si ) _regs.esi = (uint32_t)_regs.esi; \
- if ( using_di ) _regs.edi = (uint32_t)_regs.edi; \
+ _regs.r(cx) = 0; \
+ if ( using_si ) _regs.r(si) = _regs._esi; \
+ if ( using_di ) _regs.r(di) = _regs._edi; \
} \
goto no_writeback; \
} \
/* Clip maximum repetitions so that the index register at most just wraps. */
#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({ \
unsigned long todo__, ea__ = truncate_word(ea, ad_bytes); \
- if ( !(ctxt->regs->eflags & EFLG_DF) ) \
+ if ( !(_regs._eflags & EFLG_DF) ) \
todo__ = truncate_word(-(ea), ad_bytes) / (bytes_per_rep); \
else if ( truncate_word((ea) + (bytes_per_rep) - 1, ad_bytes) < ea__ )\
todo__ = 1; \
{
struct segment_register reg;
- if ( ctxt->regs->eflags & EFLG_VM )
+ if ( ctxt->regs->_eflags & EFLG_VM )
return 3;
if ( (ops->read_segment == NULL) ||
int cpl = get_cpl(ctxt, ops);
if ( cpl == -1 )
return -1;
- return (cpl <= ((ctxt->regs->eflags >> 12) & 3));
+ return (cpl <= ((ctxt->regs->_eflags >> 12) & 3));
}
#define mode_ring0() ({ \
struct segment_register tr;
int rc = X86EMUL_OKAY;
- if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() )
+ if ( !(ctxt->regs->_eflags & EFLG_VM) && mode_iopl() )
return X86EMUL_OKAY;
fail_if(ops->read_segment == NULL);
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- return !(in_realmode(ctxt, ops) || (ctxt->regs->eflags & EFLG_VM));
+ return !(in_realmode(ctxt, ops) || (ctxt->regs->_eflags & EFLG_VM));
}
#define EAX 0
switch ( modrm_reg )
{
- case 0: p = ®s->eax; break;
- case 1: p = ®s->ecx; break;
- case 2: p = ®s->edx; break;
- case 3: p = ®s->ebx; break;
- case 4: p = (highbyte_regs ?
- ((unsigned char *)®s->eax + 1) :
- (unsigned char *)®s->esp); break;
- case 5: p = (highbyte_regs ?
- ((unsigned char *)®s->ecx + 1) :
- (unsigned char *)®s->ebp); break;
- case 6: p = (highbyte_regs ?
- ((unsigned char *)®s->edx + 1) :
- (unsigned char *)®s->esi); break;
- case 7: p = (highbyte_regs ?
- ((unsigned char *)®s->ebx + 1) :
- (unsigned char *)®s->edi); break;
+ case 0: p = ®s->r(ax); break;
+ case 1: p = ®s->r(cx); break;
+ case 2: p = ®s->r(dx); break;
+ case 3: p = ®s->r(bx); break;
+ case 4: p = (highbyte_regs ? ®s->ah : (void *)®s->r(sp)); break;
+ case 5: p = (highbyte_regs ? ®s->ch : (void *)®s->r(bp)); break;
+ case 6: p = (highbyte_regs ? ®s->dh : (void *)®s->r(si)); break;
+ case 7: p = (highbyte_regs ? ®s->bh : (void *)®s->r(di)); break;
#if defined(__x86_64__)
case 8: p = ®s->r8; break;
case 9: p = ®s->r9; break;
* a 32bit OS. Someone with many TUITs can see about reading the
* TSS Software Interrupt Redirection bitmap.
*/
- if ( (ctxt->regs->eflags & EFLG_VM) &&
- ((ctxt->regs->eflags & EFLG_IOPL) != EFLG_IOPL) )
+ if ( (ctxt->regs->_eflags & EFLG_VM) &&
+ ((ctxt->regs->_eflags & EFLG_IOPL) != EFLG_IOPL) )
goto raise_exn;
/*
default:
BUG(); /* Shouldn't be possible. */
case 2:
- if ( in_realmode(ctxt, ops) || (state->regs->eflags & EFLG_VM) )
+ if ( in_realmode(ctxt, ops) || (state->regs->_eflags & EFLG_VM) )
break;
/* fall through */
case 4:
switch ( modrm_rm )
{
case 0:
- ea.mem.off = state->regs->ebx + state->regs->esi;
+ ea.mem.off = state->regs->bx + state->regs->si;
break;
case 1:
- ea.mem.off = state->regs->ebx + state->regs->edi;
+ ea.mem.off = state->regs->bx + state->regs->di;
break;
case 2:
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp + state->regs->esi;
+ ea.mem.off = state->regs->bp + state->regs->si;
break;
case 3:
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp + state->regs->edi;
+ ea.mem.off = state->regs->bp + state->regs->di;
break;
case 4:
- ea.mem.off = state->regs->esi;
+ ea.mem.off = state->regs->si;
break;
case 5:
- ea.mem.off = state->regs->edi;
+ ea.mem.off = state->regs->di;
break;
case 6:
if ( modrm_mod == 0 )
break;
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp;
+ ea.mem.off = state->regs->bp;
break;
case 7:
- ea.mem.off = state->regs->ebx;
+ ea.mem.off = state->regs->bx;
break;
}
switch ( modrm_mod )
struct x86_emulate_state state;
int rc;
uint8_t b, d;
- bool singlestep = ctxt->regs->eflags & EFLG_TF;
+ bool singlestep = _regs._eflags & EFLG_TF;
struct operand src = { .reg = PTR_POISON };
struct operand dst = { .reg = PTR_POISON };
enum x86_swint_type swint_type;
struct segment_register cs, sreg;
case 0x00 ... 0x05: add: /* add */
- emulate_2op_SrcV("add", src, dst, _regs.eflags);
+ emulate_2op_SrcV("add", src, dst, _regs._eflags);
break;
case 0x08 ... 0x0d: or: /* or */
- emulate_2op_SrcV("or", src, dst, _regs.eflags);
+ emulate_2op_SrcV("or", src, dst, _regs._eflags);
break;
case 0x10 ... 0x15: adc: /* adc */
- emulate_2op_SrcV("adc", src, dst, _regs.eflags);
+ emulate_2op_SrcV("adc", src, dst, _regs._eflags);
break;
case 0x18 ... 0x1d: sbb: /* sbb */
- emulate_2op_SrcV("sbb", src, dst, _regs.eflags);
+ emulate_2op_SrcV("sbb", src, dst, _regs._eflags);
break;
case 0x20 ... 0x25: and: /* and */
- emulate_2op_SrcV("and", src, dst, _regs.eflags);
+ emulate_2op_SrcV("and", src, dst, _regs._eflags);
break;
case 0x28 ... 0x2d: sub: /* sub */
- emulate_2op_SrcV("sub", src, dst, _regs.eflags);
+ emulate_2op_SrcV("sub", src, dst, _regs._eflags);
break;
case 0x30 ... 0x35: xor: /* xor */
- emulate_2op_SrcV("xor", src, dst, _regs.eflags);
+ emulate_2op_SrcV("xor", src, dst, _regs._eflags);
break;
case 0x38 ... 0x3d: cmp: /* cmp */
generate_exception_if(lock_prefix, EXC_UD);
- emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
+ emulate_2op_SrcV("cmp", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
case 0x27: /* daa */
case 0x2f: /* das */ {
- uint8_t al = _regs.eax;
- unsigned long eflags = _regs.eflags;
+ uint8_t al = _regs.al;
+ unsigned int eflags = _regs._eflags;
generate_exception_if(mode_64bit(), EXC_UD);
- _regs.eflags &= ~(EFLG_CF|EFLG_AF|EFLG_SF|EFLG_ZF|EFLG_PF);
+ _regs._eflags &= ~(EFLG_CF|EFLG_AF|EFLG_SF|EFLG_ZF|EFLG_PF);
if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
{
- _regs.eflags |= EFLG_AF;
+ _regs._eflags |= EFLG_AF;
if ( b == 0x2f && (al < 6 || (eflags & EFLG_CF)) )
- _regs.eflags |= EFLG_CF;
- *(uint8_t *)&_regs.eax += (b == 0x27) ? 6 : -6;
+ _regs._eflags |= EFLG_CF;
+ _regs.al += (b == 0x27) ? 6 : -6;
}
if ( (al > 0x99) || (eflags & EFLG_CF) )
{
- *(uint8_t *)&_regs.eax += (b == 0x27) ? 0x60 : -0x60;
- _regs.eflags |= EFLG_CF;
+ _regs.al += (b == 0x27) ? 0x60 : -0x60;
+ _regs._eflags |= EFLG_CF;
}
- _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
- _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0;
- _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
+ _regs._eflags |= !_regs.al ? EFLG_ZF : 0;
+ _regs._eflags |= ((int8_t)_regs.al < 0) ? EFLG_SF : 0;
+ _regs._eflags |= even_parity(_regs.al) ? EFLG_PF : 0;
break;
}
case 0x37: /* aaa */
case 0x3f: /* aas */
generate_exception_if(mode_64bit(), EXC_UD);
- _regs.eflags &= ~EFLG_CF;
- if ( ((uint8_t)_regs.eax > 9) || (_regs.eflags & EFLG_AF) )
+ _regs._eflags &= ~EFLG_CF;
+ if ( (_regs.al > 9) || (_regs._eflags & EFLG_AF) )
{
- ((uint8_t *)&_regs.eax)[0] += (b == 0x37) ? 6 : -6;
- ((uint8_t *)&_regs.eax)[1] += (b == 0x37) ? 1 : -1;
- _regs.eflags |= EFLG_CF | EFLG_AF;
+ _regs.al += (b == 0x37) ? 6 : -6;
+ _regs.ah += (b == 0x37) ? 1 : -1;
+ _regs._eflags |= EFLG_CF | EFLG_AF;
}
- ((uint8_t *)&_regs.eax)[0] &= 0x0f;
+ _regs.al &= 0x0f;
break;
case 0x40 ... 0x4f: /* inc/dec reg */
dst.bytes = op_bytes;
dst.val = *dst.reg;
if ( b & 8 )
- emulate_1op("dec", dst, _regs.eflags);
+ emulate_1op("dec", dst, _regs._eflags);
else
- emulate_1op("inc", dst, _regs.eflags);
+ emulate_1op("inc", dst, _regs._eflags);
break;
case 0x50 ... 0x57: /* push reg */
case 0x60: /* pusha */ {
int i;
- unsigned long regs[] = {
- _regs.eax, _regs.ecx, _regs.edx, _regs.ebx,
- _regs.esp, _regs.ebp, _regs.esi, _regs.edi };
+ unsigned int regs[] = {
+ _regs._eax, _regs._ecx, _regs._edx, _regs._ebx,
+ _regs._esp, _regs._ebp, _regs._esi, _regs._edi };
generate_exception_if(mode_64bit(), EXC_UD);
fail_if(!ops->write);
case 0x61: /* popa */ {
int i;
- unsigned long dummy_esp, *regs[] = {
- (unsigned long *)&_regs.edi, (unsigned long *)&_regs.esi,
- (unsigned long *)&_regs.ebp, (unsigned long *)&dummy_esp,
- (unsigned long *)&_regs.ebx, (unsigned long *)&_regs.edx,
- (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
+ unsigned int dummy_esp, *regs[] = {
+ &_regs._edi, &_regs._esi, &_regs._ebp, &dummy_esp,
+ &_regs._ebx, &_regs._edx, &_regs._ecx, &_regs._eax };
+
generate_exception_if(mode_64bit(), EXC_UD);
for ( i = 0; i < 8; i++ )
{
goto done;
if ( src_rpl > (dst.val & 3) )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.val = (dst.val & ~3) | src_rpl;
}
else
{
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
dst.type = OP_NONE;
}
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ {
unsigned long nr_reps = get_rep_prefix(false, true);
- unsigned int port = (uint16_t)_regs.edx;
+ unsigned int port = _regs.dx;
+
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
dst.mem.seg = x86_seg_es;
dst.mem.off = truncate_ea_and_reps(_regs.r(di), nr_reps, dst.bytes);
case 0x6e ... 0x6f: /* outs %esi,%dx */ {
unsigned long nr_reps = get_rep_prefix(true, false);
- unsigned int port = (uint16_t)_regs.edx;
+ unsigned int port = _regs.dx;
+
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
ea.mem.off = truncate_ea_and_reps(_regs.r(si), nr_reps, dst.bytes);
if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
}
case 0x70 ... 0x7f: /* jcc (short) */
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
jmp_rel((int32_t)src.val);
break;
case 0xa8 ... 0xa9: /* test imm,%%eax */
case 0x84 ... 0x85: test: /* test */
- emulate_2op_SrcV("test", src, dst, _regs.eflags);
+ emulate_2op_SrcV("test", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
case 0x98: /* cbw/cwde/cdqe */
switch ( op_bytes )
{
- case 2: *(int16_t *)&_regs.eax = (int8_t)_regs.eax; break; /* cbw */
- case 4: _regs.eax = (uint32_t)(int16_t)_regs.eax; break; /* cwde */
- case 8: _regs.eax = (int32_t)_regs.eax; break; /* cdqe */
+ case 2: _regs.ax = (int8_t)_regs.al; break; /* cbw */
+ case 4: _regs.r(ax) = (uint32_t)(int16_t)_regs.ax; break; /* cwde */
+ case 8: _regs.r(ax) = (int32_t)_regs._eax; break; /* cdqe */
}
break;
case 0x99: /* cwd/cdq/cqo */
switch ( op_bytes )
{
- case 2:
- *(int16_t *)&_regs.edx = ((int16_t)_regs.eax < 0) ? -1 : 0;
- break;
- case 4:
- _regs.edx = (uint32_t)(((int32_t)_regs.eax < 0) ? -1 : 0);
- break;
-#ifdef __x86_64__ /* compile warning with some versions of 32-bit gcc */
- case 8:
- _regs.rdx = ((int64_t)_regs.rax < 0) ? -1 : 0;
- break;
+ case 2: _regs.dx = -((int16_t)_regs.ax < 0); break;
+ case 4: _regs.r(dx) = (uint32_t)-((int32_t)_regs._eax < 0); break;
+#ifdef __x86_64__
+ case 8: _regs.rdx = -((int64_t)_regs.rax < 0); break;
#endif
}
break;
break;
case 0x9c: /* pushf */
- generate_exception_if((_regs.eflags & EFLG_VM) &&
- MASK_EXTR(_regs.eflags, EFLG_IOPL) != 3,
+ generate_exception_if((_regs._eflags & EFLG_VM) &&
+ MASK_EXTR(_regs._eflags, EFLG_IOPL) != 3,
EXC_GP, 0);
src.val = _regs.r(flags) & ~(EFLG_VM | EFLG_RF);
goto push;
if ( !mode_ring0() )
{
- generate_exception_if((_regs.eflags & EFLG_VM) &&
- MASK_EXTR(_regs.eflags, EFLG_IOPL) != 3,
+ generate_exception_if((_regs._eflags & EFLG_VM) &&
+ MASK_EXTR(_regs._eflags, EFLG_IOPL) != 3,
EXC_GP, 0);
mask |= EFLG_IOPL;
if ( !mode_iopl() )
&dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
if ( op_bytes == 2 )
- dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u);
+ dst.val = (uint16_t)dst.val | (_regs._eflags & 0xffff0000u);
dst.val &= EFLAGS_MODIFIABLE;
- _regs.eflags &= mask;
- _regs.eflags |= (uint32_t)(dst.val & ~mask) | EFLG_MBS;
+ _regs._eflags &= mask;
+ _regs._eflags |= (dst.val & ~mask) | EFLG_MBS;
break;
}
case 0x9e: /* sahf */
if ( mode_64bit() )
vcpu_must_have(lahf_lm);
- *(uint8_t *)&_regs.eflags = (((uint8_t *)&_regs.eax)[1] &
- EFLAGS_MASK) | EFLG_MBS;
+ *(uint8_t *)&_regs._eflags = (_regs.ah & EFLAGS_MASK) | EFLG_MBS;
break;
case 0x9f: /* lahf */
if ( mode_64bit() )
vcpu_must_have(lahf_lm);
- ((uint8_t *)&_regs.eax)[1] = (_regs.eflags & EFLAGS_MASK) | EFLG_MBS;
+ _regs.ah = (_regs._eflags & EFLAGS_MASK) | EFLG_MBS;
break;
case 0xa4 ... 0xa5: /* movs */ {
register_address_adjust(_regs.r(di), src.bytes);
put_rep_prefix(1);
/* cmp: dst - src ==> src=*%%edi,dst=*%%esi ==> *%%esi - *%%edi */
- emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
- if ( (repe_prefix() && !(_regs.eflags & EFLG_ZF)) ||
- (repne_prefix() && (_regs.eflags & EFLG_ZF)) )
+ emulate_2op_SrcV("cmp", src, dst, _regs._eflags);
+ if ( (repe_prefix() && !(_regs._eflags & EFLG_ZF)) ||
+ (repne_prefix() && (_regs._eflags & EFLG_ZF)) )
_regs.r(ip) = next_eip;
break;
}
put_rep_prefix(1);
/* cmp: %%eax - *%%edi ==> src=%%eax,dst=*%%edi ==> src - dst */
dst.bytes = src.bytes;
- emulate_2op_SrcV("cmp", dst, src, _regs.eflags);
- if ( (repe_prefix() && !(_regs.eflags & EFLG_ZF)) ||
- (repne_prefix() && (_regs.eflags & EFLG_ZF)) )
+ emulate_2op_SrcV("cmp", dst, src, _regs._eflags);
+ if ( (repe_prefix() && !(_regs._eflags & EFLG_ZF)) ||
+ (repne_prefix() && (_regs._eflags & EFLG_ZF)) )
_regs.r(ip) = next_eip;
break;
}
switch ( modrm_reg & 7 )
{
case 0: /* rol */
- emulate_2op_SrcB("rol", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rol", src, dst, _regs._eflags);
break;
case 1: /* ror */
- emulate_2op_SrcB("ror", src, dst, _regs.eflags);
+ emulate_2op_SrcB("ror", src, dst, _regs._eflags);
break;
case 2: /* rcl */
- emulate_2op_SrcB("rcl", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rcl", src, dst, _regs._eflags);
break;
case 3: /* rcr */
- emulate_2op_SrcB("rcr", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rcr", src, dst, _regs._eflags);
break;
case 4: /* sal/shl */
case 6: /* sal/shl */
- emulate_2op_SrcB("sal", src, dst, _regs.eflags);
+ emulate_2op_SrcB("sal", src, dst, _regs._eflags);
break;
case 5: /* shr */
- emulate_2op_SrcB("shr", src, dst, _regs.eflags);
+ emulate_2op_SrcB("shr", src, dst, _regs._eflags);
break;
case 7: /* sar */
- emulate_2op_SrcB("sar", src, dst, _regs.eflags);
+ emulate_2op_SrcB("sar", src, dst, _regs._eflags);
break;
}
break;
case 0xc9: /* leave */
/* First writeback, to %%esp. */
dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
- switch ( dst.bytes )
- {
- case 2: *(uint16_t *)&_regs.esp = (uint16_t)_regs.ebp; break;
- case 4: _regs.esp = (uint32_t)_regs.ebp; break; /* 64b: zero-ext */
- case 8: _regs.esp = _regs.ebp; break;
- }
+ if ( dst.bytes == 2 )
+ _regs.sp = _regs.bp;
+ else
+ _regs.r(sp) = dst.bytes == 4 ? _regs._ebp : _regs.r(bp);
/* Second writeback, to %%ebp. */
dst.type = OP_REG;
case 0xce: /* into */
generate_exception_if(mode_64bit(), EXC_UD);
- if ( !(_regs.eflags & EFLG_OF) )
+ if ( !(_regs._eflags & EFLG_OF) )
break;
src.val = EXC_OF;
swint_type = x86_swint_into;
&eflags, op_bytes, ctxt, ops)) )
goto done;
if ( op_bytes == 2 )
- eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u);
+ eflags = (uint16_t)eflags | (_regs._eflags & 0xffff0000u);
eflags &= EFLAGS_MODIFIABLE;
- _regs.eflags &= mask;
- _regs.eflags |= (eflags & ~mask) | EFLG_MBS;
+ _regs._eflags &= mask;
+ _regs._eflags |= (eflags & ~mask) | EFLG_MBS;
if ( (rc = load_seg(x86_seg_cs, sel, 1, &cs, ctxt, ops)) ||
(rc = commit_far_branch(&cs, (uint32_t)eip)) )
goto done;
goto grp2;
case 0xd2 ... 0xd3: /* Grp2 */
- src.val = _regs.ecx;
+ src.val = _regs.cl;
goto grp2;
case 0xd4: /* aam */
generate_exception_if(mode_64bit(), EXC_UD);
if ( b & 0x01 )
{
- uint16_t ax = _regs.eax;
+ uint16_t ax = _regs.ax;
- *(uint16_t *)&_regs.eax = (uint8_t)(ax + ((ax >> 8) * base));
+ _regs.ax = (uint8_t)(ax + ((ax >> 8) * base));
}
else
{
- uint8_t al = _regs.eax;
+ uint8_t al = _regs.al;
generate_exception_if(!base, EXC_DE);
- *(uint16_t *)&_regs.eax = ((al / base) << 8) | (al % base);
+ _regs.ax = ((al / base) << 8) | (al % base);
}
- _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
- _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
- _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0;
- _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
+ _regs._eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
+ _regs._eflags |= !_regs.al ? EFLG_ZF : 0;
+ _regs._eflags |= ((int8_t)_regs.al < 0) ? EFLG_SF : 0;
+ _regs._eflags |= even_parity(_regs.al) ? EFLG_PF : 0;
break;
}
case 0xd6: /* salc */
generate_exception_if(mode_64bit(), EXC_UD);
- *(uint8_t *)&_regs.eax = (_regs.eflags & EFLG_CF) ? 0xff : 0x00;
+ _regs.al = (_regs._eflags & EFLG_CF) ? 0xff : 0x00;
break;
case 0xd7: /* xlat */ {
- unsigned long al = (uint8_t)_regs.eax;
- if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.ebx + al),
+ unsigned long al;
+
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(bx) + _regs.al),
&al, 1, ctxt, ops)) != 0 )
goto done;
- *(uint8_t *)&_regs.eax = al;
+ _regs.al = al;
break;
}
/* fnstsw %ax */
dst.bytes = 2;
dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
+ dst.reg = (void *)&_regs.ax;
emulate_fpu_insn_memdst("fnstsw", dst.val);
break;
case 0xe8 ... 0xef: /* fucomip %stN */
case 0xe0 ... 0xe2: /* loop{,z,nz} */ {
unsigned long count = get_loop_count(&_regs, ad_bytes);
- int do_jmp = !(_regs.eflags & EFLG_ZF); /* loopnz */
+ int do_jmp = !(_regs._eflags & EFLG_ZF); /* loopnz */
if ( b == 0xe1 )
do_jmp = !do_jmp; /* loopz */
case 0xed: /* in %dx,%eax */
case 0xee: /* out %al,%dx */
case 0xef: /* out %eax,%dx */ {
- unsigned int port = ((b < 0xe8) ? (uint8_t)src.val
- : (uint16_t)_regs.edx);
+ unsigned int port = ((b < 0xe8) ? (uint8_t)src.val : _regs.dx);
op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops)) != 0 )
{
/* out */
fail_if(ops->write_io == NULL);
- rc = ops->write_io(port, op_bytes, _regs.eax, ctxt);
+ rc = ops->write_io(port, op_bytes, _regs._eax, ctxt);
}
else
{
break;
case 0xf5: /* cmc */
- _regs.eflags ^= EFLG_CF;
+ _regs._eflags ^= EFLG_CF;
break;
case 0xf6 ... 0xf7: /* Grp3 */
dst.val = ~dst.val;
break;
case 3: /* neg */
- emulate_1op("neg", dst, _regs.eflags);
+ emulate_1op("neg", dst, _regs._eflags);
break;
case 4: /* mul */
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )
{
case 1:
- dst.val = (uint8_t)_regs.eax;
+ dst.val = _regs.al;
dst.val *= src.val;
if ( (uint8_t)dst.val != (uint16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
dst.bytes = 2;
break;
case 2:
- dst.val = (uint16_t)_regs.eax;
+ dst.val = _regs.ax;
dst.val *= src.val;
if ( (uint16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- *(uint16_t *)&_regs.edx = dst.val >> 16;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
+ _regs.dx = dst.val >> 16;
break;
#ifdef __x86_64__
case 4:
u[0] = src.val;
u[1] = _regs.r(ax);
if ( mul_dbl(u) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
_regs.r(dx) = u[1];
dst.val = u[0];
break;
break;
case 5: /* imul */
imul:
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )
{
case 1:
- dst.val = (int8_t)src.val * (int8_t)_regs.eax;
+ dst.val = (int8_t)src.val * (int8_t)_regs.al;
if ( (int8_t)dst.val != (int16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
ASSERT(b > 0x6b);
dst.bytes = 2;
break;
case 2:
dst.val = ((uint32_t)(int16_t)src.val *
- (uint32_t)(int16_t)_regs.eax);
+ (uint32_t)(int16_t)_regs.ax);
if ( (int16_t)dst.val != (int32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
if ( b > 0x6b )
- *(uint16_t *)&_regs.edx = dst.val >> 16;
+ _regs.dx = dst.val >> 16;
break;
#ifdef __x86_64__
case 4:
u[0] = src.val;
u[1] = _regs.r(ax);
if ( imul_dbl(u) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
if ( b > 0x6b )
_regs.r(dx) = u[1];
dst.val = u[0];
switch ( src.bytes )
{
case 1:
- u[0] = (uint16_t)_regs.eax;
+ u[0] = _regs.ax;
u[1] = 0;
v = (uint8_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
EXC_DE);
dst.val = (uint8_t)u[0];
- ((uint8_t *)&_regs.eax)[1] = u[1];
+ _regs.ah = u[1];
break;
case 2:
- u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax;
+ u[0] = (_regs._edx << 16) | _regs.ax;
u[1] = 0;
v = (uint16_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
EXC_DE);
dst.val = (uint16_t)u[0];
- *(uint16_t *)&_regs.edx = u[1];
+ _regs.dx = u[1];
break;
#ifdef __x86_64__
case 4:
switch ( src.bytes )
{
case 1:
- u[0] = (int16_t)_regs.eax;
+ u[0] = (int16_t)_regs.ax;
u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
v = (int8_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
EXC_DE);
dst.val = (int8_t)u[0];
- ((int8_t *)&_regs.eax)[1] = u[1];
+ _regs.ah = u[1];
break;
case 2:
- u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax);
+ u[0] = (int32_t)((_regs._edx << 16) | _regs.ax);
u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
v = (int16_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
EXC_DE);
dst.val = (int16_t)u[0];
- *(int16_t *)&_regs.edx = u[1];
+ _regs.dx = u[1];
break;
#ifdef __x86_64__
case 4:
break;
case 0xf8: /* clc */
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
break;
case 0xf9: /* stc */
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
break;
case 0xfa: /* cli */
generate_exception_if(!mode_iopl(), EXC_GP, 0);
- _regs.eflags &= ~EFLG_IF;
+ _regs._eflags &= ~EFLG_IF;
break;
case 0xfb: /* sti */
generate_exception_if(!mode_iopl(), EXC_GP, 0);
- if ( !(_regs.eflags & EFLG_IF) )
+ if ( !(_regs._eflags & EFLG_IF) )
{
- _regs.eflags |= EFLG_IF;
+ _regs._eflags |= EFLG_IF;
ctxt->retire.sti = true;
}
break;
case 0xfc: /* cld */
- _regs.eflags &= ~EFLG_DF;
+ _regs._eflags &= ~EFLG_DF;
break;
case 0xfd: /* std */
- _regs.eflags |= EFLG_DF;
+ _regs._eflags |= EFLG_DF;
break;
case 0xfe: /* Grp4 */
switch ( modrm_reg & 7 )
{
case 0: /* inc */
- emulate_1op("inc", dst, _regs.eflags);
+ emulate_1op("inc", dst, _regs._eflags);
break;
case 1: /* dec */
- emulate_1op("dec", dst, _regs.eflags);
+ emulate_1op("dec", dst, _regs._eflags);
break;
case 2: /* call (near) */
dst.val = _regs.r(ip);
goto done;
break;
case 4: /* verr / verw */
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
&sreg, ctxt, ops) )
{
if ( sreg.attr.fields.s &&
((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
: ((sreg.attr.fields.type & 0xa) != 0x8)) )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
generate_exception_if(lock_prefix || vex.pfx || !mode_ring0(),
EXC_UD);
- _regs.eflags &= ~EFLG_AC;
+ _regs._eflags &= ~EFLG_AC;
if ( modrm == 0xcb )
- _regs.eflags |= EFLG_AC;
+ _regs._eflags |= EFLG_AC;
goto no_writeback;
#ifdef __XEN__
generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(),
EXC_UD);
/* Neither HLE nor RTM can be active when we get here. */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
goto no_writeback;
case 0xdf: /* invlpga */
unsigned int eax = 1, ebx = 0, dummy = 0;
unsigned long zero = 0;
- base = ad_bytes == 8 ? _regs.eax :
- ad_bytes == 4 ? (uint32_t)_regs.eax : (uint16_t)_regs.eax;
+ base = ad_bytes == 8 ? _regs.r(ax) :
+ ad_bytes == 4 ? _regs._eax : _regs.ax;
limit = 0;
if ( vcpu_has_clflush() &&
ops->cpuid(&eax, &ebx, &dummy, &dummy, ctxt) == X86EMUL_OKAY )
case X86EMUL_OPC(0x0f, 0x02): /* lar */
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
ctxt, ops) )
{
case 0x09: /* available 32/64-bit TSS */
case 0x0b: /* busy 32/64-bit TSS */
case 0x0c: /* 32/64-bit call gate */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
}
}
else
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
rc = X86EMUL_OKAY;
break;
}
- if ( _regs.eflags & EFLG_ZF )
+ if ( _regs._eflags & EFLG_ZF )
dst.val = ((sreg.attr.bytes & 0xff) << 8) |
((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
0xf0000) |
case X86EMUL_OPC(0x0f, 0x03): /* lsl */
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
ctxt, ops) )
{
case 0x02: /* LDT */
case 0x09: /* available 32/64-bit TSS */
case 0x0b: /* busy 32/64-bit TSS */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
}
}
else
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
rc = X86EMUL_OKAY;
break;
}
- if ( _regs.eflags & EFLG_ZF )
+ if ( _regs._eflags & EFLG_ZF )
dst.val = sreg.limit;
else
dst.type = OP_NONE;
cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
_regs.rcx = _regs.rip;
- _regs.r11 = _regs.eflags & ~EFLG_RF;
+ _regs.r11 = _regs._eflags & ~EFLG_RF;
if ( (rc = ops->read_msr(mode_64bit() ? MSR_LSTAR : MSR_CSTAR,
&msr_content, ctxt)) != 0 )
if ( (rc = ops->read_msr(MSR_FMASK, &msr_content, ctxt)) != 0 )
goto done;
- _regs.eflags &= ~(msr_content | EFLG_RF);
+ _regs._eflags &= ~(msr_content | EFLG_RF);
}
else
#endif
{
cs.attr.bytes = 0xc9b; /* G+DB+P+S+Code */
- _regs.ecx = (uint32_t)_regs.eip;
- _regs.eip = (uint32_t)msr_content;
- _regs.eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ _regs.r(cx) = _regs._eip;
+ _regs._eip = msr_content;
+ _regs._eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
fail_if(ops->write_segment == NULL);
* mitigation is to use a task gate for handling #DB (or to not use
* enable EFER.SCE to start with).
*/
- singlestep = _regs.eflags & EFLG_TF;
+ singlestep = _regs._eflags & EFLG_TF;
break;
}
goto done;
break;
- case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */ {
- uint64_t val = ((uint64_t)_regs.edx << 32) | (uint32_t)_regs.eax;
+ case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->write_msr == NULL);
- if ( (rc = ops->write_msr((uint32_t)_regs.ecx, val, ctxt)) != 0 )
+ if ( (rc = ops->write_msr(_regs._ecx,
+ ((uint64_t)_regs.r(dx) << 32) | _regs._eax,
+ ctxt)) != 0 )
goto done;
break;
- }
case X86EMUL_OPC(0x0f, 0x31): rdtsc: /* rdtsc */ {
unsigned long cr4;
uint64_t val;
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->read_msr == NULL);
- if ( (rc = ops->read_msr((uint32_t)_regs.ecx, &val, ctxt)) != 0 )
+ if ( (rc = ops->read_msr(_regs._ecx, &val, ctxt)) != 0 )
goto done;
_regs.r(dx) = val >> 32;
_regs.r(ax) = (uint32_t)val;
case X86EMUL_OPC(0x0f, 0x40) ... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */
vcpu_must_have(cmov);
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
dst.val = src.val;
break;
if ( lm < 0 )
goto cannot_emulate;
- _regs.eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ _regs._eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
cs.sel = msr_content & ~3; /* SELECTOR_RPL_MASK */
cs.base = 0; /* flat segment */
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) != 0 )
goto done;
- _regs.eip = op_bytes == 8 ? _regs.edx : (uint32_t)_regs.edx;
- _regs.esp = op_bytes == 8 ? _regs.ecx : (uint32_t)_regs.ecx;
+ _regs.r(ip) = op_bytes == 8 ? _regs.r(dx) : _regs._edx;
+ _regs.r(sp) = op_bytes == 8 ? _regs.r(cx) : _regs._ecx;
break;
}
}
case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
jmp_rel((int32_t)src.val);
break;
case X86EMUL_OPC(0x0f, 0x90) ... X86EMUL_OPC(0x0f, 0x9f): /* setcc */
- dst.val = test_cc(b, _regs.eflags);
+ dst.val = test_cc(b, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xa2): /* cpuid */ {
- unsigned int eax = _regs.eax, ebx = _regs.ebx;
- unsigned int ecx = _regs.ecx, edx = _regs.edx;
+ unsigned int eax = _regs._eax, ebx = _regs._ebx;
+ unsigned int ecx = _regs._ecx, edx = _regs._edx;
+
fail_if(ops->cpuid == NULL);
rc = ops->cpuid(&eax, &ebx, &ecx, &edx, ctxt);
generate_exception_if(rc == X86EMUL_EXCEPTION,
}
case X86EMUL_OPC(0x0f, 0xa3): bt: /* bt */
- emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("bt", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
generate_exception_if(lock_prefix, EXC_UD);
if ( b & 1 )
- shift = _regs.ecx;
+ shift = _regs.cl;
else
{
shift = src.val;
((dst.orig_val << shift) |
((src.val >> (width - shift)) & ((1ull << shift) - 1))));
dst.val = truncate_word(dst.val, dst.bytes);
- _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 )
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 )
- _regs.eflags |= EFLG_OF;
- _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
- _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0;
- _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0;
+ _regs._eflags |= EFLG_OF;
+ _regs._eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
+ _regs._eflags |= (dst.val == 0) ? EFLG_ZF : 0;
+ _regs._eflags |= even_parity(dst.val) ? EFLG_PF : 0;
break;
}
case X86EMUL_OPC(0x0f, 0xab): bts: /* bts */
- emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("bts", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xae): case X86EMUL_OPC_66(0x0f, 0xae): /* Grp15 */
}
case X86EMUL_OPC(0x0f, 0xaf): /* imul */
- emulate_2op_SrcV_srcmem("imul", src, dst, _regs.eflags);
+ emulate_2op_SrcV_srcmem("imul", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xb0): case X86EMUL_OPC(0x0f, 0xb1): /* cmpxchg */
src.orig_val = src.val;
src.val = _regs.r(ax);
/* cmp: %%eax - dst ==> dst and src swapped for macro invocation */
- emulate_2op_SrcV("cmp", dst, src, _regs.eflags);
- if ( _regs.eflags & EFLG_ZF )
+ emulate_2op_SrcV("cmp", dst, src, _regs._eflags);
+ if ( _regs._eflags & EFLG_ZF )
{
/* Success: write back to memory. */
dst.val = src.orig_val;
goto les;
case X86EMUL_OPC(0x0f, 0xb3): btr: /* btr */
- emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("btr", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xb6): /* movzx rm8,r{16,32,64} */
break;
case X86EMUL_OPC(0x0f, 0xbb): btc: /* btc */
- emulate_2op_SrcV_nobyte("btc", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("btc", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xbc): /* bsf or tzcnt */
asm ( "bsf %2,%0" ASM_FLAG_OUT(, "; setz %1")
: "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf)
: "rm" (src.val) );
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
{
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
if ( zf )
{
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
dst.val = op_bytes * 8;
}
else if ( !dst.val )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
else if ( zf )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.type = OP_NONE;
}
break;
asm ( "bsr %2,%0" ASM_FLAG_OUT(, "; setz %1")
: "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf)
: "rm" (src.val) );
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
{
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
if ( zf )
{
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
dst.val = op_bytes * 8;
}
else
{
dst.val = op_bytes * 8 - 1 - dst.val;
if ( !dst.val )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
}
else if ( zf )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.type = OP_NONE;
}
break;
/* Get expected value. */
if ( !(rex_prefix & REX_W) )
{
- aux->u32[0] = _regs.eax;
- aux->u32[1] = _regs.edx;
+ aux->u32[0] = _regs._eax;
+ aux->u32[1] = _regs._edx;
}
else
{
/* Expected != actual: store actual to rDX:rAX and clear ZF. */
_regs.r(ax) = !(rex_prefix & REX_W) ? old->u32[0] : old->u64[0];
_regs.r(dx) = !(rex_prefix & REX_W) ? old->u32[1] : old->u64[1];
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
}
else
{
*/
if ( !(rex_prefix & REX_W) )
{
- aux->u32[0] = _regs.ebx;
- aux->u32[1] = _regs.ecx;
+ aux->u32[0] = _regs._ebx;
+ aux->u32[1] = _regs._ecx;
}
else
{
if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, aux,
op_bytes, ctxt)) != X86EMUL_OKAY )
goto done;
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
break;
}
/* Zero the upper 32 bits of %rip if not in 64-bit mode. */
if ( !mode_64bit() )
- _regs.eip = (uint32_t)_regs.eip;
+ _regs.r(ip) = _regs._eip;
/* Should a singlestep #DB be raised? */
if ( rc == X86EMUL_OKAY )
rc = X86EMUL_OKAY;
}
- ctxt->regs->eflags &= ~EFLG_RF;
+ ctxt->regs->_eflags &= ~EFLG_RF;
done:
_put_fpu();