head->func = func;
head->next = NULL;
local_irq_save(flags);
- rdp = &__get_cpu_var(rcu_data);
+ rdp = &this_cpu(rcu_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
if (unlikely(++rdp->qlen > qhimark)) {
static void rcu_process_callbacks(void)
{
- __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_ctrlblk, &this_cpu(rcu_data));
}
static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
#define per_cpu(var, cpu) \
(*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
-#define __get_cpu_var(var) \
+#define this_cpu(var) \
(*RELOC_HIDE(&per_cpu__##var, READ_SYSREG(TPIDR_EL2)))
#define per_cpu_ptr(var, cpu) \
(*RELOC_HIDE(var, __per_cpu_offset[cpu]))
-#define __get_cpu_ptr(var) \
+#define this_cpu_ptr(var) \
(*RELOC_HIDE(var, READ_SYSREG(TPIDR_EL2)))
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) \
(*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
-#define __get_cpu_var(var) \
+#define this_cpu(var) \
(*RELOC_HIDE(&per_cpu__##var, get_cpu_info()->per_cpu_offset))
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-#define __get_cpu_ptr(var) \
+#define this_cpu_ptr(var) \
(*RELOC_HIDE(var, get_cpu_info()->per_cpu_offset))
#define per_cpu_ptr(var, cpu) \
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
__DEFINE_PER_CPU(type, _##name, .read_mostly)
-/* Preferred on Xen. Also see arch-defined per_cpu(). */
-#define this_cpu(var) __get_cpu_var(var)
-
-#define this_cpu_ptr(ptr) __get_cpu_ptr(ptr)
-
#define get_per_cpu_var(var) (per_cpu__##var)
/* Linux compatibility. */
#define AVC_CACHE_RECLAIM 16
#ifdef CONFIG_XSM_FLASK_AVC_STATS
-#define avc_cache_stats_incr(field) \
-do { \
- __get_cpu_var(avc_cache_stats).field++; \
+#define avc_cache_stats_incr(field) \
+do { \
+ this_cpu(avc_cache_stats).field++; \
} while (0)
#else
#define avc_cache_stats_incr(field) do {} while (0)