From: Sergey Fedorov Date: Fri, 15 Jul 2016 17:58:43 +0000 (+0300) Subject: tcg: Prepare safe tb_jmp_cache lookup out of tb_lock X-Git-Tag: qemu-xen-4.9.0-rc1~200^2~53 X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=89a16b1e4294e3664667a151c2f70c84dfac6fd9;p=qemu-xen.git tcg: Prepare safe tb_jmp_cache lookup out of tb_lock Ensure atomicity of CPU's 'tb_jmp_cache' access for future translation block lookup out of 'tb_lock'. Note that this patch does *not* make CPU's TLB invalidation safe if it is done from some other thread while the CPU is in its execution loop. Signed-off-by: Alex Bennée Signed-off-by: Sergey Fedorov Signed-off-by: Sergey Fedorov Reviewed-by: Alex Bennée Message-Id: <20160715175852.30749-4-sergey.fedorov@linaro.org> Signed-off-by: Paolo Bonzini --- diff --git a/cpu-exec.c b/cpu-exec.c index cf511f1b9a..32b58edb31 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -315,7 +315,7 @@ static TranslationBlock *tb_find_slow(CPUState *cpu, found: /* we add the TB in the virtual pc hash table */ - cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; + atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); return tb; } @@ -333,7 +333,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, is executed. */ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); tb_lock(); - tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; + tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || tb->flags != flags)) { tb = tb_find_slow(cpu, pc, cs_base, flags); diff --git a/translate-all.c b/translate-all.c index 0dd6466e07..77ae59d7e9 100644 --- a/translate-all.c +++ b/translate-all.c @@ -851,7 +851,11 @@ void tb_flush(CPUState *cpu) tcg_ctx.tb_ctx.nb_tbs = 0; CPU_FOREACH(cpu) { - memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + int i; + + for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { + atomic_set(&cpu->tb_jmp_cache[i], NULL); + } cpu->tb_flushed = true; } @@ -1010,8 +1014,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { - if (cpu->tb_jmp_cache[h] == tb) { - cpu->tb_jmp_cache[h] = NULL; + if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { + atomic_set(&cpu->tb_jmp_cache[h], NULL); } }