]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
xen/sched: move per-cpu variable cpupool to struct sched_resource
authorJuergen Gross <jgross@suse.com>
Wed, 2 Oct 2019 07:27:35 +0000 (09:27 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 4 Oct 2019 10:59:18 +0000 (12:59 +0200)
Having a pointer to struct cpupool in struct sched_resource instead
of per cpu is enough.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
xen/common/cpupool.c
xen/common/sched_credit.c
xen/common/sched_rt.c
xen/common/schedule.c
xen/include/xen/sched-if.h

index 441a26f16c5044a63c73d3e9bfd533891c27d245..60a85f50e1f773bcf7521d750ddd16dc84cb46b2 100644 (file)
@@ -34,8 +34,6 @@ static cpumask_t cpupool_locked_cpus;
 
 static DEFINE_SPINLOCK(cpupool_lock);
 
-DEFINE_PER_CPU(struct cpupool *, cpupool);
-
 static void free_cpupool_struct(struct cpupool *c)
 {
     if ( c )
@@ -504,7 +502,7 @@ static int cpupool_cpu_add(unsigned int cpu)
      * (or unplugging would have failed) and that is the default behavior
      * anyway.
      */
-    per_cpu(cpupool, cpu) = NULL;
+    get_sched_res(cpu)->cpupool = NULL;
     ret = cpupool_assign_cpu_locked(cpupool0, cpu);
 
     spin_unlock(&cpupool_lock);
index 86603adcb68d244bc493e955296a07fba31b665a..31fdcd6a2fd64d95bcfa7084702f52d6de6b0240 100644 (file)
@@ -1681,7 +1681,7 @@ static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
     struct csched_unit *snext, bool *stolen)
 {
-    struct cpupool *c = per_cpu(cpupool, cpu);
+    struct cpupool *c = get_sched_res(cpu)->cpupool;
     struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
index d21c416cae26ffd303c2e48b5c5cac596dd9c700..6e93e50acbac608b30fc1a341deeaf18311200de 100644 (file)
@@ -774,7 +774,7 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 
     if ( prv->repl_timer.cpu == cpu )
     {
-        struct cpupool *c = per_cpu(cpupool, cpu);
+        struct cpupool *c = get_sched_res(cpu)->cpupool;
         unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
 
         /*
index 6da96695c2c0b16bb78fa6b782679bff440cec5a..ab9b573a92ca8a2abc355cfaf0ed73c9fd0fcc1a 100644 (file)
@@ -1120,7 +1120,7 @@ int cpu_disable_scheduler(unsigned int cpu)
     cpumask_t online_affinity;
     int ret = 0;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return ret;
 
@@ -1189,7 +1189,7 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
     struct vcpu *v;
     struct cpupool *c;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return 0;
 
@@ -2558,8 +2558,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
-    struct cpupool *old_pool = per_cpu(cpupool, cpu);
     struct sched_resource *sd = get_sched_res(cpu);
+    struct cpupool *old_pool = sd->cpupool;
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
@@ -2641,7 +2641,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     sched_free_udata(old_ops, vpriv_old);
     sched_free_pdata(old_ops, ppriv_old, cpu);
 
-    per_cpu(cpupool, cpu) = c;
+    get_sched_res(cpu)->cpupool = c;
     /* When a cpu is added to a pool, trigger it to go pick up some work */
     if ( c != NULL )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
index 01821b3e5b520ecc965778ddfde3561df935c429..e675061290994e7f8b71053d4a056824f905f29e 100644 (file)
@@ -37,6 +37,7 @@ extern const cpumask_t *sched_res_mask;
  * one it wants (This may be the one right in front of it).*/
 struct sched_resource {
     struct scheduler   *scheduler;
+    struct cpupool     *cpupool;
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
@@ -50,7 +51,6 @@ struct sched_resource {
     const cpumask_t    *cpus;           /* cpus covered by this struct     */
 };
 
-DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
 
 static inline struct sched_resource *get_sched_res(unsigned int cpu)