In icount mode, instructions that access io memory spaces in the middle
of the translation block invoke TB recompilation. After recompilation,
such instructions become last in the TB and are allowed to access io
memory spaces.
When the code includes instruction like i386 'xchg eax, 0xffffd080'
which accesses APIC, QEMU goes into an infinite loop of the recompilation.
This instruction includes two memory accesses - one read and one write.
After the first access, APIC calls cpu_report_tpr_access, which restores
the CPU state to get the current eip. But cpu_restore_state_from_tb
resets the cpu->can_do_io flag which makes the second memory access invalid.
Therefore the second memory access causes a recompilation of the block.
Then these operations repeat again and again.
This patch moves resetting cpu->can_do_io flag from
cpu_restore_state_from_tb to cpu_loop_exit* functions.
It also adds a parameter for cpu_restore_state which controls restoring
icount. There is no need to restore icount when we only query CPU state
without breaking the TB. Restoring it in such cases leads to the
incorrect flow of the virtual time.
In most cases new parameter is true (icount should be recalculated).
But there are two cases in i386 and openrisc when the CPU state is only
queried without the need to break the TB. This patch fixes both of
these cases.
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
Message-Id: <
20180409091320.12504.35329.stgit@pasha-VirtualBox>
[rth: Make can_do_io setting unconditional; move from cpu_exec;
make cpu_loop_exit_{noexc,restore} call cpu_loop_exit.]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
/* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu)
{
- /* XXX: restore cpu registers saved in host registers */
-
cpu->exception_index = -1;
- siglongjmp(cpu->jmp_env, 1);
+ cpu_loop_exit(cpu);
}
#if defined(CONFIG_SOFTMMU)
void cpu_loop_exit(CPUState *cpu)
{
+ /* Undo the setting in cpu_tb_exec. */
+ cpu->can_do_io = 1;
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
{
if (pc) {
- cpu_restore_state(cpu, pc);
+ cpu_restore_state(cpu, pc, true);
}
- siglongjmp(cpu->jmp_env, 1);
+ cpu_loop_exit(cpu);
}
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#endif /* buggy compiler */
- cpu->can_do_io = 1;
tb_lock_reset();
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
/* The cpu state corresponding to 'searched_pc' is restored.
* Called with tb_lock held.
+ * When reset_icount is true, current TB will be interrupted and
+ * icount should be recalculated.
*/
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
- uintptr_t searched_pc)
+ uintptr_t searched_pc, bool reset_icount)
{
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
return -1;
found:
- if (tb->cflags & CF_USE_ICOUNT) {
+ if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
assert(use_icount);
- /* Reset the cycle counter to the start of the block. */
- cpu->icount_decr.u16.low += num_insns;
- /* Clear the IO flag. */
- cpu->can_do_io = 0;
+ /* Reset the cycle counter to the start of the block
+ and shift if to the number of actually executed instructions */
+ cpu->icount_decr.u16.low += num_insns - i;
}
- cpu->icount_decr.u16.low -= i;
restore_state_to_opc(env, tb, data);
#ifdef CONFIG_PROFILER
return 0;
}
-bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
{
TranslationBlock *tb;
bool r = false;
tb_lock();
tb = tb_find_pc(host_pc);
if (tb) {
- cpu_restore_state_from_tb(cpu, tb, host_pc);
+ cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1);
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
+ cpu_restore_state_from_tb(cpu, current_tb,
+ cpu->mem_io_pc, true);
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
¤t_flags);
}
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state_from_tb(cpu, current_tb, pc);
+ cpu_restore_state_from_tb(cpu, current_tb, pc, true);
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
¤t_flags);
}
tb = tb_find_pc(cpu->mem_io_pc);
if (tb) {
/* We can use retranslation to find the PC. */
- cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
+ cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
tb_phys_invalidate(tb, -1);
} else {
/* The exception probably happened in a helper. The CPU state should
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
(void *)retaddr);
}
- cpu_restore_state_from_tb(cpu, tb, retaddr);
+ cpu_restore_state_from_tb(cpu, tb, retaddr, true);
/* On MIPS and SH, delay slot instructions can only be restarted if
they were already the first instruction in the TB. If this is not
}
/* Now we have a real cpu fault. */
- cpu_restore_state(cpu, pc);
+ cpu_restore_state(cpu, pc, true);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
static void QEMU_NORETURN block_thread_and_exit(ITCStorageCell *c)
{
c->blocked_threads |= 1ULL << current_cpu->cpu_index;
- cpu_restore_state(current_cpu, current_cpu->mem_io_pc);
current_cpu->halted = 1;
current_cpu->exception_index = EXCP_HLT;
- cpu_loop_exit(current_cpu);
+ cpu_loop_exit_restore(current_cpu, current_cpu->mem_io_pc);
}
/* ITC Bypass View */
* cpu_restore_state:
* @cpu: the vCPU state is to be restore to
* @searched_pc: the host PC the fault occurred at
+ * @will_exit: true if the TB executed will be interrupted after some
+ cpu adjustments. Required for maintaining the correct
+ icount valus
* @return: true if state was restored, false otherwise
*
* Attempt to restore the state for a fault occurring in translated
* code. If the searched_pc is not in translated code no state is
* restored and the function returns false.
*/
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
cs->exception_index = excp;
env->error_code = error;
if (retaddr) {
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
/* Floating-point exceptions (our only users) point to the next PC. */
env->pc += 4;
}
uint64_t pc;
uint32_t insn;
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
pc = env->pc;
insn = cpu_ldl_code(env, pc);
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
- cpu_restore_state(cs, retaddr);
-
env->trap_arg0 = addr;
env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0;
cs->exception_index = EXCP_MCHK;
env->error_code = 0;
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
/* try to fill the TLB and return an exception if error. If retaddr is
ARMCPU *cpu = ARM_CPU(cs);
/* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- if (cpu_restore_state(cs, retaddr)) {
- /* Evaluate flags after retranslation. */
+ if (cpu_restore_state(cs, retaddr, true)) {
+ /* Evaluate flags after retranslation. */
helper_top_evaluate_flags(env);
}
}
cpu_interrupt(cs, CPU_INTERRUPT_TPR);
} else if (tcg_enabled()) {
- cpu_restore_state(cs, cs->mem_io_pc);
+ cpu_restore_state(cs, cs->mem_io_pc, false);
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
}
{
CPUState *cs = CPU(x86_env_get_cpu(env));
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
PRIx64 ", " TARGET_FMT_lx ")!\n",
CPUState *cs = CPU(m68k_env_get_cpu(env));
/* Recover PC and CC_OP for the beginning of the insn. */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), true);
/* flags have been modified by gen_flush_flags() */
env->cc_op = CC_OP_FLAGS;
CPUState *cs = CPU(m68k_env_get_cpu(env));
/* Recover PC and CC_OP for the beginning of the insn. */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), true);
/* flags have been modified by gen_flush_flags() */
env->cc_op = CC_OP_FLAGS;
/* Stash the exception type. */
env->sregs[2] = ex;
/* Stash the address where the exception occurred. */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), true);
env->sregs[5] = env->pc;
/* Jump to the exception handline routine. */
env->pc = env->sregs[1];
break;
case TO_SPR(0, 16): /* NPC */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), true);
/* ??? Mirror or1ksim in not trashing delayed branch state
when "jumping" to the current instruction. */
if (env->pc != rb) {
case TO_SPR(8, 0): /* PMR */
env->pmr = rb;
if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), true);
env->pc += 4;
cs->halted = 1;
raise_exception(cpu, EXCP_HALTED);
return env->evbar;
case TO_SPR(0, 16): /* NPC (equals PC) */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), false);
return env->pc;
case TO_SPR(0, 17): /* SR */
return cpu_get_sr(env);
case TO_SPR(0, 18): /* PPC */
- cpu_restore_state(cs, GETPC());
+ cpu_restore_state(cs, GETPC(), false);
return env->ppc;
case TO_SPR(0, 32): /* EPCR */
{
CPUState *cs = CPU(tricore_env_get_cpu(env));
/* in case we come from a helper-call we need to restore the PC */
- cpu_restore_state(cs, pc);
+ cpu_restore_state(cs, pc, true);
/* Tin is loaded into d[15] */
env->gpr_d[15] = tin;
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
- cpu_restore_state(CPU(cpu), retaddr);
+ cpu_restore_state(CPU(cpu), retaddr, true);
HELPER(exception_cause_vaddr)(env,
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
}
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
} else {
- cpu_restore_state(cs, retaddr);
+ cpu_restore_state(cs, retaddr, true);
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
}
}