The PERFC_INCR() macro uses current->processor, but current is not valid
during early boot. This causes the following crash to occur if
e.g. rdmsr_safe() has to recover from a #GP fault.
(XEN) Early fatal page fault at e008:
ffff82d0803b1a39 (cr2=
0000000000000004, ec=0000)
(XEN) ----[ Xen-4.14-unstable x86_64 debug=y Not tainted ]----
(XEN) CPU: 0
(XEN) RIP: e008:[<
ffff82d0803b1a39>] x86_64/entry.S#handle_exception_saved+0x64/0xb8
...
(XEN) Xen call trace:
(XEN) [<
ffff82d0803b1a39>] R x86_64/entry.S#handle_exception_saved+0x64/0xb8
(XEN) [<
ffff82d0806394fe>] F __start_xen+0x2cd/0x2980
(XEN) [<
ffff82d0802000ec>] F __high_start+0x4c/0x4e
Furthermore, the PERFC_INCR() macro is wildly inefficient. There has been a
single caller for many releases now, so inline it and delete the macro
completely.
There is no need to reference current at all. What is actually needed is the
per_cpu_offset which can be obtained directly from the top-of-stack block.
This simplifies the counter handling to 3 instructions and no spilling to the
stack at all.
The same breakage from above is now handled properly:
(XEN) traps.c:1591: GPF (0000):
ffff82d0806394fe [__start_xen+0x2cd/0x2980] ->
ffff82d0803b3bfb
Reported-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Tested-by: Julien Grall <jgrall@amazon.com>
master commit:
615bfe42c6d183a0e54a0525ef82b58580d01619
master date: 2020-04-16 09:48:38 +0100
OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
+ OFFSET(CPUINFO_per_cpu_offset, struct cpu_info, per_cpu_offset);
OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3);
1: movq %rsp,%rdi
movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
- PERFC_INCR(exceptions, %rax, %rbx)
+#ifdef CONFIG_PERF_COUNTERS
+ lea per_cpu__perfcounters(%rip), %rcx
+ add STACK_CPUINFO_FIELD(per_cpu_offset)(%r14), %rcx
+ incl ASM_PERFC_exceptions * 4(%rcx, %rax, 4)
+#endif
mov (%rdx, %rax, 8), %rdx
INDIRECT_CALL %rdx
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
#endif
-#ifdef CONFIG_PERF_COUNTERS
-#define PERFC_INCR(_name,_idx,_cur) \
- pushq _cur; \
- movslq VCPU_processor(_cur),_cur; \
- pushq %rdx; \
- leaq __per_cpu_offset(%rip),%rdx; \
- movq (%rdx,_cur,8),_cur; \
- leaq per_cpu__perfcounters(%rip),%rdx; \
- addq %rdx,_cur; \
- popq %rdx; \
- incl ASM_PERFC_##_name*4(_cur,_idx,4); \
- popq _cur
-#else
-#define PERFC_INCR(_name,_idx,_cur)
-#endif
-
/* Work around AMD erratum #88 */
#define safe_swapgs \
"mfence; swapgs;"