... and friends; alloc_direct_apic_vector() and set_direct_apic_vector().
Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.
Use cf_check to annotate function pointer targets for the toolchain.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
return apic_tmict || !timeout;
}
-void apic_timer_interrupt(struct cpu_user_regs * regs)
+void cf_check apic_timer_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(apic_timer);
/*
* Spurious interrupts should _never_ happen with our APIC/SMP architecture.
*/
-void spurious_interrupt(struct cpu_user_regs *regs)
+void cf_check spurious_interrupt(struct cpu_user_regs *regs)
{
/*
* Check if this is a vectored interrupt (most likely, as this is probably
* This interrupt should never happen with our APIC/SMP architecture
*/
-void error_interrupt(struct cpu_user_regs *regs)
+void cf_check error_interrupt(struct cpu_user_regs *regs)
{
static const char *const esr_fields[] = {
"Send CS error",
* This interrupt handles performance counters interrupt
*/
-void pmu_apic_interrupt(struct cpu_user_regs *regs)
+void cf_check pmu_apic_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
vpmu_do_interrupt(regs);
#define MCE_RING 0x1
static DEFINE_PER_CPU(int, last_state);
-static void intel_thermal_interrupt(struct cpu_user_regs *regs)
+static void cf_check intel_thermal_interrupt(struct cpu_user_regs *regs)
{
uint64_t msr_content;
unsigned int cpu = smp_processor_id();
clear_cmci();
}
-static void cmci_interrupt(struct cpu_user_regs *regs)
+static void cf_check cmci_interrupt(struct cpu_user_regs *regs)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
}
}
-static void xen_evtchn_upcall(struct cpu_user_regs *regs)
+static void cf_check xen_evtchn_upcall(struct cpu_user_regs *regs)
{
struct vcpu_info *vcpu_info = this_cpu(vcpu_info);
unsigned long pending;
}
}
-static void hpet_interrupt_handler(int irq, void *data,
- struct cpu_user_regs *regs)
+static void cf_check hpet_interrupt_handler(
+ int irq, void *data, struct cpu_user_regs *regs)
{
struct hpet_event_channel *ch = data;
};
/* Handle VT-d posted-interrupt when VCPU is blocked. */
-static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
+static void cf_check pi_wakeup_interrupt(struct cpu_user_regs *regs)
{
struct vmx_vcpu *vmx, *tmp;
spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock;
}
/* Handle VT-d posted-interrupt when VCPU is running. */
-static void pi_notification_interrupt(struct cpu_user_regs *regs)
+static void cf_check pi_notification_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
this_cpu(irq_count)++;
#define platform_legacy_irq(irq) ((irq) < 16)
-void event_check_interrupt(struct cpu_user_regs *regs);
-void invalidate_interrupt(struct cpu_user_regs *regs);
-void call_function_interrupt(struct cpu_user_regs *regs);
-void apic_timer_interrupt(struct cpu_user_regs *regs);
-void error_interrupt(struct cpu_user_regs *regs);
-void pmu_apic_interrupt(struct cpu_user_regs *regs);
-void spurious_interrupt(struct cpu_user_regs *regs);
-void irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
+void cf_check event_check_interrupt(struct cpu_user_regs *regs);
+void cf_check invalidate_interrupt(struct cpu_user_regs *regs);
+void cf_check call_function_interrupt(struct cpu_user_regs *regs);
+void cf_check apic_timer_interrupt(struct cpu_user_regs *regs);
+void cf_check error_interrupt(struct cpu_user_regs *regs);
+void cf_check pmu_apic_interrupt(struct cpu_user_regs *regs);
+void cf_check spurious_interrupt(struct cpu_user_regs *regs);
+void cf_check irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
uint8_t alloc_hipriority_vector(void);
desc->handler->enable(desc);
}
-void irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
+void cf_check irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
{
unsigned vector, me;
static const void *flush_va;
static unsigned int flush_flags;
-void invalidate_interrupt(struct cpu_user_regs *regs)
+void cf_check invalidate_interrupt(struct cpu_user_regs *regs)
{
unsigned int flags = flush_flags;
ack_APIC_irq();
send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
}
-void event_check_interrupt(struct cpu_user_regs *regs)
+void cf_check event_check_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(ipis);
this_cpu(irq_count)++;
}
-void call_function_interrupt(struct cpu_user_regs *regs)
+void cf_check call_function_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(ipis);
}
}
-static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
+static void cf_check timer_interrupt(
+ int irq, void *dev_id, struct cpu_user_regs *regs)
{
ASSERT(local_irq_is_enabled());
}
}
-static void iommu_interrupt_handler(int irq, void *dev_id,
- struct cpu_user_regs *regs)
+static void cf_check iommu_interrupt_handler(
+ int irq, void *dev_id, struct cpu_user_regs *regs)
{
unsigned long flags;
struct amd_iommu *iommu = dev_id;
__do_iommu_page_fault(drhd->iommu);
}
-static void iommu_page_fault(int irq, void *dev_id,
- struct cpu_user_regs *regs)
+static void cf_check iommu_page_fault(
+ int irq, void *dev_id, struct cpu_user_regs *regs)
{
/*
* Just flag the tasklet as runnable. This is fine, according to VT-d