if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
goto fail3;
- softirq_tasklet_init(
- &v->arch.hvm.assert_evtchn_irq_tasklet,
- (void(*)(unsigned long))hvm_assert_evtchn_irq,
- (unsigned long)v);
+ softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
+ (void (*)(void *))hvm_assert_evtchn_irq, v);
v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
vcpu_unpause(target);
}
-static void vlapic_init_sipi_action(unsigned long _vcpu)
+static void vlapic_init_sipi_action(void *data)
{
- struct vcpu *origin = (struct vcpu *)_vcpu;
+ struct vcpu *origin = data;
uint32_t icr = vcpu_vlapic(origin)->init_sipi.icr;
uint32_t dest = vcpu_vlapic(origin)->init_sipi.dest;
uint32_t short_hand = icr & APIC_SHORT_MASK;
spin_lock_init(&vlapic->esr_lock);
- tasklet_init(&vlapic->init_sipi.tasklet,
- vlapic_init_sipi_action,
- (unsigned long)v);
+ tasklet_init(&vlapic->init_sipi.tasklet, vlapic_init_sipi_action, v);
if ( v->vcpu_id == 0 )
register_mmio_handler(v->domain, &vlapic_mmio_ops);
#ifdef CONFIG_PV
-void pv_l1tf_tasklet(unsigned long data)
+void pv_l1tf_tasklet(void *data)
{
- struct domain *d = (void *)data;
+ struct domain *d = data;
domain_pause(d);
paging_lock(d);
spin_lock_init(&v->virq_lock);
- tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
+ tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
grant_table_init_vcpu(v);
static DEFINE_PER_CPU(struct migrate_info *, continue_info);
-static void continue_hypercall_tasklet_handler(unsigned long _info)
+static void continue_hypercall_tasklet_handler(void *data)
{
- struct migrate_info *info = (struct migrate_info *)_info;
+ struct migrate_info *info = data;
struct vcpu *v = info->vcpu;
long res = -EINVAL;
info->vcpu = curr;
info->nest = 0;
- tasklet_kill(
- &curr->continue_hypercall_tasklet);
- tasklet_init(
- &curr->continue_hypercall_tasklet,
- continue_hypercall_tasklet_handler,
- (unsigned long)info);
+ tasklet_kill(&curr->continue_hypercall_tasklet);
+ tasklet_init(&curr->continue_hypercall_tasklet,
+ continue_hypercall_tasklet_handler, info);
get_knownalive_domain(curr->domain);
vcpu_pause_nosync(curr);
#undef KEYHANDLER
};
-static void keypress_action(unsigned long unused)
+static void keypress_action(void *unused)
{
handle_keypress(keypress_key, NULL);
}
-static DECLARE_TASKLET(keypress_tasklet, keypress_action, 0);
+static DECLARE_TASKLET(keypress_tasklet, keypress_action, NULL);
void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
{
watchdog_enable();
}
-static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, 0);
+static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, NULL);
-static void dump_hwdom_action(unsigned long arg)
+static void dump_hwdom_action(void *data)
{
- struct vcpu *v = (void *)arg;
+ struct vcpu *v = data;
for ( ; ; )
{
break;
if ( softirq_pending(smp_processor_id()) )
{
- dump_hwdom_tasklet.data = (unsigned long)v;
+ dump_hwdom_tasklet.data = v;
tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
break;
}
if ( alt_key_handling && softirq_pending(smp_processor_id()) )
{
tasklet_kill(&dump_hwdom_tasklet);
- tasklet_init(&dump_hwdom_tasklet, dump_hwdom_action,
- (unsigned long)v);
+ tasklet_init(&dump_hwdom_tasklet, dump_hwdom_action, v);
tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
return;
}
maxdif_cycles, sumdif_cycles/count, count, dif_cycles);
}
-static void run_all_nonirq_keyhandlers(unsigned long unused)
+static void run_all_nonirq_keyhandlers(void *unused)
{
/* Fire all the non-IRQ-context diagnostic keyhandlers */
struct keyhandler *h;
}
static DECLARE_TASKLET(run_all_keyhandlers_tasklet,
- run_all_nonirq_keyhandlers, 0);
+ run_all_nonirq_keyhandlers, NULL);
static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs)
{
return ret;
}
-static void stopmachine_action(unsigned long cpu)
+static void stopmachine_action(void *data)
{
+ unsigned int cpu = (unsigned long)data;
enum stopmachine_state state = STOPMACHINE_START;
BUG_ON(cpu != smp_processor_id());
if ( action == CPU_UP_PREPARE )
tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
- stopmachine_action, cpu);
+ stopmachine_action, hcpu);
return NOTIFY_DONE;
}
spin_unlock_irqrestore(&tasklet_lock, flags);
}
-void tasklet_init(
- struct tasklet *t, void (*func)(unsigned long), unsigned long data)
+void tasklet_init(struct tasklet *t, void (*func)(void *), void *data)
{
memset(t, 0, sizeof(*t));
INIT_LIST_HEAD(&t->list);
t->data = data;
}
-void softirq_tasklet_init(
- struct tasklet *t, void (*func)(unsigned long), unsigned long data)
+void softirq_tasklet_init(struct tasklet *t, void (*func)(void *), void *data)
{
tasklet_init(t, func, data);
t->is_softirq = 1;
* Notification is performed in qtasklet to avoid deadlocks with contexts
* which __trace_var() may be called from (e.g., scheduler critical regions).
*/
-static void trace_notify_dom0(unsigned long unused)
+static void trace_notify_dom0(void *unused)
{
send_global_virq(VIRQ_TBUF);
}
static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet,
- trace_notify_dom0, 0);
+ trace_notify_dom0, NULL);
/**
* __trace_var - Enters a trace tuple into the trace buffer for the current CPU.
__serial_rx(c, regs);
}
-static void notify_dom0_con_ring(unsigned long unused)
+static void notify_dom0_con_ring(void *unused)
{
send_global_virq(VIRQ_CON_RING);
}
static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet,
- notify_dom0_con_ring, 0);
+ notify_dom0_con_ring, NULL);
#ifdef CONFIG_X86
static inline void xen_console_write_debug_port(const char *buf, size_t len)
return 0;
}
-static void guest_iommu_process_command(unsigned long _d)
+static void guest_iommu_process_command(void *data)
{
unsigned long opcode, tail, head, entries_per_page, cmd_mfn;
cmd_entry_t *cmd, *cmd_base;
- struct domain *d = (struct domain *)_d;
+ struct domain *d = data;
struct guest_iommu *iommu;
iommu = domain_iommu(d);
iommu->domain = d;
hd->arch.g_iommu = iommu;
- tasklet_init(&iommu->cmd_buffer_tasklet,
- guest_iommu_process_command, (unsigned long)d);
+ tasklet_init(&iommu->cmd_buffer_tasklet, guest_iommu_process_command, d);
spin_lock_init(&iommu->lock);
static int __initdata nr_amd_iommus;
static bool __initdata pci_init;
-static void do_amd_iommu_irq(unsigned long data);
-static DECLARE_SOFTIRQ_TASKLET(amd_iommu_irq_tasklet, do_amd_iommu_irq, 0);
+static void do_amd_iommu_irq(void *data);
+static DECLARE_SOFTIRQ_TASKLET(amd_iommu_irq_tasklet, do_amd_iommu_irq, NULL);
unsigned int __read_mostly ivrs_bdf_entries;
u8 __read_mostly ivhd_type;
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void do_amd_iommu_irq(unsigned long data)
+static void do_amd_iommu_irq(void *unused)
{
struct amd_iommu *iommu;
return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
}
-static void iommu_free_pagetables(unsigned long unused)
+static void iommu_free_pagetables(void *unused)
{
do {
struct page_info *pg;
iommu_hwdom_passthrough ? "Passthrough" :
iommu_hwdom_strict ? "Strict" : "Relaxed");
printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis");
- tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0);
+ tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, NULL);
}
return rc;
}
}
-static void do_iommu_page_fault(unsigned long data)
+static void do_iommu_page_fault(void *unused)
{
struct acpi_drhd_unit *drhd;
}
}
- softirq_tasklet_init(&vtd_fault_tasklet, do_iommu_page_fault, 0);
+ softirq_tasklet_init(&vtd_fault_tasklet, do_iommu_page_fault, NULL);
if ( !iommu_qinval && iommu_intremap )
{
return pv_l1tf_check_pte(d, 4, l4e.l4);
}
-void pv_l1tf_tasklet(unsigned long data);
+void pv_l1tf_tasklet(void *data);
static inline void pv_l1tf_domain_init(struct domain *d)
{
: opt_pv_l1tf_domu;
#ifdef CONFIG_SHADOW_PAGING
- tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet,
- pv_l1tf_tasklet, (unsigned long)d);
+ tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet, pv_l1tf_tasklet, d);
#endif
}
bool_t is_softirq;
bool_t is_running;
bool_t is_dead;
- void (*func)(unsigned long);
- unsigned long data;
+ void (*func)(void *);
+ void *data;
};
#define _DECLARE_TASKLET(name, func, data, softirq) \
void tasklet_schedule(struct tasklet *t);
void do_tasklet(void);
void tasklet_kill(struct tasklet *t);
-void tasklet_init(
- struct tasklet *t, void (*func)(unsigned long), unsigned long data);
-void softirq_tasklet_init(
- struct tasklet *t, void (*func)(unsigned long), unsigned long data);
+void tasklet_init(struct tasklet *t, void (*func)(void *), void *data);
+void softirq_tasklet_init(struct tasklet *t, void (*func)(void *), void *data);
void tasklet_subsys_init(void);
#endif /* __XEN_TASKLET_H__ */