}
__initcall(hvm_register_CPU_save_and_restore);
+static void cf_check hvm_assert_evtchn_irq_tasklet(void *v)
+{
+ hvm_assert_evtchn_irq(v);
+}
+
int hvm_vcpu_initialise(struct vcpu *v)
{
int rc;
goto fail3;
softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
- (void (*)(void *))hvm_assert_evtchn_irq, v);
+ hvm_assert_evtchn_irq_tasklet, v);
v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
vcpu_unpause(target);
}
-static void vlapic_init_sipi_action(void *data)
+static void cf_check vlapic_init_sipi_action(void *data)
{
struct vcpu *origin = data;
uint32_t icr = vcpu_vlapic(origin)->init_sipi.icr;
return pv_l1tf_check_pte(d, 4, l4e.l4);
}
-void pv_l1tf_tasklet(void *data);
+void cf_check pv_l1tf_tasklet(void *data);
static inline void pv_l1tf_domain_init(struct domain *d)
{
#ifdef CONFIG_PV
-void pv_l1tf_tasklet(void *data)
+void cf_check pv_l1tf_tasklet(void *data)
{
struct domain *d = data;
static DEFINE_PER_CPU(struct migrate_info *, continue_info);
-static void continue_hypercall_tasklet_handler(void *data)
+static void cf_check continue_hypercall_tasklet_handler(void *data)
{
struct migrate_info *info = data;
struct vcpu *v = info->vcpu;
#undef KEYHANDLER
};
-static void keypress_action(void *unused)
+static void cf_check keypress_action(void *unused)
{
handle_keypress(keypress_key, NULL);
}
static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, NULL);
-static void dump_hwdom_action(void *data)
+static void cf_check dump_hwdom_action(void *data)
{
struct vcpu *v = data;
maxdif_cycles, sumdif_cycles/count, count, dif_cycles);
}
-static void run_all_nonirq_keyhandlers(void *unused)
+static void cf_check run_all_nonirq_keyhandlers(void *unused)
{
/* Fire all the non-IRQ-context diagnostic keyhandlers */
struct keyhandler *h;
return 0;
}
-static void tasklet_fn(void *unused)
+static void cf_check tasklet_fn(void *unused)
{
this_cpu(work_to_do) = 1;
}
return ret;
}
-static void stopmachine_action(void *data)
+static void cf_check stopmachine_action(void *data)
{
unsigned int cpu = (unsigned long)data;
enum stopmachine_state state = STOPMACHINE_START;
* Notification is performed in qtasklet to avoid deadlocks with contexts
* which __trace_var() may be called from (e.g., scheduler critical regions).
*/
-static void trace_notify_dom0(void *unused)
+static void cf_check trace_notify_dom0(void *unused)
{
send_global_virq(VIRQ_TBUF);
}
__serial_rx(c, regs);
}
-static void notify_dom0_con_ring(void *unused)
+static void cf_check notify_dom0_con_ring(void *unused)
{
send_global_virq(VIRQ_CON_RING);
}
return 0;
}
-static void guest_iommu_process_command(void *data)
+static void cf_check guest_iommu_process_command(void *data)
{
unsigned long opcode, tail, head, cmd_mfn;
cmd_entry_t *cmd;
static int __initdata nr_amd_iommus;
static bool __initdata pci_init;
-static void do_amd_iommu_irq(void *data);
+static void cf_check do_amd_iommu_irq(void *data);
static DECLARE_SOFTIRQ_TASKLET(amd_iommu_irq_tasklet, do_amd_iommu_irq, NULL);
unsigned int __read_mostly amd_iommu_acpi_info;
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void do_amd_iommu_irq(void *unused)
+static void cf_check do_amd_iommu_irq(void *unused)
{
struct amd_iommu *iommu;
}
}
-static void do_iommu_page_fault(void *unused)
+static void cf_check do_iommu_page_fault(void *unused)
{
struct acpi_drhd_unit *drhd;