struct x86_emulate_ctxt *ctxt)
{
/* Reads expected for all user and system segments. */
- assert(is_x86_user_segment(seg) || is_x86_system_segment(seg));
+ if ( is_x86_user_segment(seg) )
+ assert(ctxt->addr_size == 64 || !(offset >> 32));
+ else if ( seg == x86_seg_tr )
+ /*
+ * The TSS is special in that accesses below the segment base are
+ * possible, as the Interrupt Redirection Bitmap starts 32 bytes
+ * ahead of the I/O Bitmap, regardless of the value of the latter.
+ */
+ assert((long)offset < 0 ? (long)offset > -32 : !(offset >> 17));
+ else
+ assert(is_x86_system_segment(seg) &&
+ (ctxt->lma ? offset <= 0x10007 : !(offset >> 16)));
return data_read(ctxt, seg, "read", p_data, bytes);
}
{
assert(seg == x86_seg_cs);
+ /* Minimal segment limit checking, until full one is being put in place. */
+ if ( ctxt->addr_size < 64 && (offset >> 32) )
+ {
+ x86_emul_hw_exception(13, 0, ctxt);
+ return X86EMUL_EXCEPTION;
+ }
+
/*
* Zero-length instruction fetches are made at the destination of jumps,
* to perform segmentation checks. No data needs returning.
struct x86_emulate_ctxt *ctxt)
{
assert(dst_seg == x86_seg_es);
+ assert(ctxt->addr_size == 64 || !(dst_offset >> 32));
return _fuzz_rep_read(ctxt, "rep_ins", reps);
}
{
assert(is_x86_user_segment(src_seg));
assert(dst_seg == x86_seg_es);
+ assert(ctxt->addr_size == 64 || !((src_offset | dst_offset) >> 32));
return _fuzz_rep_read(ctxt, "rep_movs", reps);
}
struct x86_emulate_ctxt *ctxt)
{
assert(is_x86_user_segment(src_seg));
+ assert(ctxt->addr_size == 64 || !(src_offset >> 32));
return _fuzz_rep_write(ctxt, "rep_outs", reps);
}
* for CLZERO.
*/
assert(is_x86_user_segment(seg));
+ assert(ctxt->addr_size == 64 || !(offset >> 32));
return _fuzz_rep_write(ctxt, "rep_stos", reps);
}
{
/* Writes not expected for any system segments. */
assert(is_x86_user_segment(seg));
+ assert(ctxt->addr_size == 64 || !(offset >> 32));
return maybe_fail(ctxt, "write", true);
}
* Cmpxchg expected for user segments, and setting accessed/busy bits in
* GDT/LDT enties, but not expected for any IDT or TR accesses.
*/
- assert(is_x86_user_segment(seg) ||
- seg == x86_seg_gdtr || seg == x86_seg_ldtr);
+ if ( is_x86_user_segment(seg) )
+ assert(ctxt->addr_size == 64 || !(offset >> 32));
+ else
+ assert((seg == x86_seg_gdtr || seg == x86_seg_ldtr) && !(offset >> 16));
return maybe_fail(ctxt, "cmpxchg", true);
}
{
/* invlpg(), unlike all other hooks, may be called with x86_seg_none. */
assert(is_x86_user_segment(seg) || seg == x86_seg_none);
+ assert(ctxt->addr_size == 64 || !(offset >> 32));
return maybe_fail(ctxt, "invlpg", false);
}
/* Clip maximum repetitions so that the index register at most just wraps. */
#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({ \
- unsigned long todo__, ea__ = truncate_word(ea, ad_bytes); \
+ unsigned long todo__, ea__ = truncate_ea(ea); \
if ( !(_regs.eflags & X86_EFLAGS_DF) ) \
- todo__ = truncate_word(-(ea), ad_bytes) / (bytes_per_rep); \
- else if ( truncate_word((ea) + (bytes_per_rep) - 1, ad_bytes) < ea__ )\
+ todo__ = truncate_ea(-ea__) / (bytes_per_rep); \
+ else if ( truncate_ea(ea__ + (bytes_per_rep) - 1) < ea__ ) \
todo__ = 1; \
else \
todo__ = ea__ / (bytes_per_rep) + 1; \
op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L));
else
ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L);
+ ea.mem.off = truncate_ea(ea.mem.off);
}
/* Bit index always truncated to within range. */
unsigned long src_val2;
int lb, ub, idx;
generate_exception_if(src.type != OP_MEM, EXC_UD);
- if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
+ if ( (rc = read_ulong(src.mem.seg, truncate_ea(src.mem.off + op_bytes),
&src_val2, op_bytes, ctxt, ops)) )
goto done;
ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
seg = (b & 1) * 3; /* es = 0, ds = 3 */
les:
generate_exception_if(src.type != OP_MEM, EXC_UD);
- if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ if ( (rc = read_ulong(src.mem.seg, truncate_ea(src.mem.off + src.bytes),
&dst.val, 2, ctxt, ops)) != X86EMUL_OKAY )
goto done;
ASSERT(is_x86_user_segment(seg));
case 5: /* jmp (far, absolute indirect) */
generate_exception_if(src.type != OP_MEM, EXC_UD);
- if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
+ if ( (rc = read_ulong(src.mem.seg,
+ truncate_ea(src.mem.off + op_bytes),
&imm2, 2, ctxt, ops)) )
goto done;
imm1 = src.val;
}
if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit,
2, ctxt)) != X86EMUL_OKAY ||
- (rc = ops->write(ea.mem.seg, ea.mem.off + 2, &sreg.base,
- op_bytes, ctxt)) != X86EMUL_OKAY )
+ (rc = ops->write(ea.mem.seg, truncate_ea(ea.mem.off + 2),
+ &sreg.base, op_bytes, ctxt)) != X86EMUL_OKAY )
goto done;
break;
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->write_segment == NULL);
memset(&sreg, 0, sizeof(sreg));
- if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+ if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
&limit, 2, ctxt, ops)) ||
- (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
+ (rc = read_ulong(ea.mem.seg, truncate_ea(ea.mem.off + 2),
&base, mode_64bit() ? 8 : 4, ctxt, ops)) )
goto done;
generate_exception_if(!is_canonical_address(base), EXC_GP, 0);