/* Are we here for running vcpu context tasklets, or for idling? */
if ( unlikely(tasklet_work_to_do(cpu)) )
+ {
do_tasklet();
+ /* Livepatch work is always kicked off via a tasklet. */
+ check_for_livepatch_work();
+ }
/*
* Test softirqs twice --- first to see if should even try scrubbing
* and then, after it is done, whether softirqs became pending
do_idle();
do_softirq();
- /*
- * We MUST be last (or before dsb, wfi). Otherwise after we get the
- * softirq we would execute dsb,wfi (and sleep) and not patch.
- */
- check_for_livepatch_work();
}
}
/* Are we here for running vcpu context tasklets, or for idling? */
if ( unlikely(tasklet_work_to_do(cpu)) )
+ {
do_tasklet();
+ /* Livepatch work is always kicked off via a tasklet. */
+ check_for_livepatch_work();
+ }
/*
* Test softirqs twice --- first to see if should even try scrubbing
* and then, after it is done, whether softirqs became pending
!softirq_pending(cpu) )
pm_idle();
do_softirq();
- /*
- * We MUST be last (or before pm_idle). Otherwise after we get the
- * softirq we would execute pm_idle (and sleep) and not patch.
- */
- check_for_livepatch_work();
}
}
#include <xen/spinlock.h>
#include <xen/string.h>
#include <xen/symbols.h>
+#include <xen/tasklet.h>
#include <xen/version.h>
#include <xen/virtual_region.h>
#include <xen/vmap.h>
* Having an per-cpu lessens the load.
*/
static DEFINE_PER_CPU(bool_t, work_to_do);
+static DEFINE_PER_CPU(struct tasklet, livepatch_tasklet);
static int get_name(const struct xen_livepatch_name *name, char *n)
{
smp_wmb();
livepatch_work.do_work = 1;
- this_cpu(work_to_do) = 1;
+ tasklet_schedule_on_cpu(&this_cpu(livepatch_tasklet), smp_processor_id());
put_cpu_maps();
return 0;
}
-static void reschedule_fn(void *unused)
+static void tasklet_fn(void *unused)
{
this_cpu(work_to_do) = 1;
- raise_softirq(SCHEDULE_SOFTIRQ);
}
static int livepatch_spin(atomic_t *counter, s_time_t timeout,
if ( atomic_inc_and_test(&livepatch_work.semaphore) )
{
struct payload *p;
- unsigned int cpus;
+ unsigned int cpus, i;
bool action_done = false;
p = livepatch_work.data;
{
dprintk(XENLOG_DEBUG, LIVEPATCH "%s: CPU%u - IPIing the other %u CPUs\n",
p->name, cpu, cpus);
- smp_call_function(reschedule_fn, NULL, 0);
+ for_each_online_cpu ( i )
+ if ( i != cpu )
+ tasklet_schedule_on_cpu(&per_cpu(livepatch_tasklet, i), i);
}
timeout = livepatch_work.timeout + NOW();
spin_unlock(&payload_lock);
}
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
+ tasklet_init(&per_cpu(livepatch_tasklet, cpu), tasklet_fn, NULL);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
static int __init livepatch_init(void)
{
+ unsigned int cpu;
+
+ for_each_online_cpu ( cpu )
+ {
+ void *hcpu = (void *)(long)cpu;
+
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ }
+
+ register_cpu_notifier(&cpu_nfb);
+
register_keyhandler('x', livepatch_printall, "print livepatch info", 1);
arch_livepatch_init();