}
}
-int cf_check generic_get_free_region(
+int mtrr_get_free_region(
unsigned long base, unsigned long size, int replace_reg)
/* [SUMMARY] Get a free MTRR.
<base> The starting (base) address of the region.
if (replace_reg >= 0 && replace_reg < max)
return replace_reg;
for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, <ype);
+ mtrr_get(i, &lbase, &lsize, <ype);
if (lsize == 0)
return i;
}
return -ENOSPC;
}
-static void cf_check generic_get_mtrr(
+void mtrr_get(
unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type)
{
uint64_t _mask, _base;
spin_unlock(&set_atomicity_lock);
}
-static void cf_check generic_set_all(void)
+void mtrr_set_all(void)
{
unsigned long mask, count;
unsigned long flags;
}
}
-static void cf_check generic_set_mtrr(
+void mtrr_set(
unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
local_irq_restore(flags);
}
-int cf_check generic_validate_add_page(
+int mtrr_validate_add_page(
unsigned long base, unsigned long size, unsigned int type)
{
unsigned long lbase, last;
}
-static int cf_check generic_have_wrcomb(void)
+bool mtrr_have_wrcomb(void)
{
unsigned long config;
rdmsrl(MSR_MTRRcap, config);
return (config & (1ULL << 10));
}
-
-/* generic structure...
- */
-const struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = true,
- .set_all = generic_set_all,
- .get = generic_get_mtrr,
- .get_free_region = generic_get_free_region,
- .set = generic_set_mtrr,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = generic_have_wrcomb,
-};
u64 __read_mostly size_or_mask;
u64 __read_mostly size_and_mask;
-const struct mtrr_ops *__read_mostly mtrr_if = NULL;
+static bool __ro_after_init mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
return (x <= 6) ? mtrr_strings[x] : "?";
}
-/* Returns non-zero if we have the write-combining memory type */
-static int have_wrcomb(void)
-{
- return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
-}
-
/* This function returns the number of variable MTRRs */
static void __init set_num_var_ranges(void)
{
- unsigned long config = 0;
-
- if (use_intel()) {
- rdmsrl(MSR_MTRRcap, config);
- } else if (is_cpu(AMD))
- config = 2;
- else if (is_cpu(CENTAUR))
- config = 8;
+ unsigned long config;
+
+ rdmsrl(MSR_MTRRcap, config);
num_var_ranges = MASK_EXTR(config, MTRRcap_VCNT);
}
if (data->smp_reg == ~0U) /* update all mtrr registers */
/* At the cpu hot-add time this will reinitialize mtrr
* registres on the existing cpus. It is ok. */
- mtrr_if->set_all();
+ mtrr_set_all();
else /* single mtrr register update */
- mtrr_if->set(data->smp_reg, data->smp_base,
- data->smp_size, data->smp_type);
+ mtrr_set(data->smp_reg, data->smp_base,
+ data->smp_size, data->smp_type);
atomic_dec(&data->count);
while(atomic_read(&data->gate))
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
* Meanwhile, they are waiting for that flag to be set. Once it's set, each
- * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
- * differently, so we call mtrr_if->set() callback and let them take care of it.
- * When they're done, they again decrement data->count and wait for data.gate to
- * be reset.
+ * CPU goes through the transition of updating MTRRs.
+ * When mtrr_set() is done, they again decrement data->count and wait for
+ * data.gate to be reset.
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
* Everyone then enables interrupts and we all continue on.
*
if (reg == ~0U) /* update all mtrr registers */
/* at boot or resume time, this will reinitialize the mtrrs on
* the bp. It is ok. */
- mtrr_if->set_all();
+ mtrr_set_all();
else /* update the single mtrr register */
- mtrr_if->set(reg,base,size,type);
+ mtrr_set(reg, base, size, type);
/* wait for the others */
while (atomic_read(&data.count))
if (!mtrr_if)
return -ENXIO;
- if ((error = mtrr_if->validate_add_page(base,size,type)))
+ if ((error = mtrr_validate_add_page(base, size, type)))
return error;
if (type >= MTRR_NUM_TYPES) {
}
/* If the type is WC, check that this processor supports it */
- if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
+ if ((type == MTRR_TYPE_WRCOMB) && mtrr_have_wrcomb()) {
printk(KERN_WARNING
"mtrr: your processor doesn't support write-combining\n");
return -EOPNOTSUPP;
/* Search for existing MTRR */
mutex_lock(&mtrr_mutex);
for (i = 0; i < num_var_ranges; ++i) {
- mtrr_if->get(i, &lbase, &lsize, <ype);
+ mtrr_get(i, &lbase, &lsize, <ype);
if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
continue;
/* At this point we know there is some kind of overlap/enclosure */
goto out;
}
/* Search for an empty MTRR */
- i = mtrr_if->get_free_region(base, size, replace);
+ i = mtrr_get_free_region(base, size, replace);
if (i >= 0) {
set_mtrr(i, base, size, type);
if (likely(replace < 0))
if (reg < 0) {
/* Search for existing MTRR */
for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, <ype);
+ mtrr_get(i, &lbase, &lsize, <ype);
if (lbase == base && lsize == size) {
reg = i;
break;
printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
goto out;
}
- mtrr_if->get(reg, &lbase, &lsize, <ype);
+ mtrr_get(reg, &lbase, &lsize, <ype);
if (lsize < 1) {
printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
goto out;
void __init mtrr_bp_init(void)
{
if (cpu_has_mtrr) {
- mtrr_if = &generic_mtrr_ops;
+ mtrr_if = true;
size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
}
if (mtrr_if) {
set_num_var_ranges();
init_table();
- if (use_intel())
- get_mtrr_state();
+ get_mtrr_state();
}
}
void mtrr_ap_init(void)
{
- if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps)
+ if (!mtrr_if || hold_mtrr_updates_on_aps)
return;
/*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
void mtrr_aps_sync_begin(void)
{
- if (!use_intel())
- return;
hold_mtrr_updates_on_aps = 1;
}
void mtrr_aps_sync_end(void)
{
- if (!use_intel())
- return;
set_mtrr(~0U, 0, 0, 0);
hold_mtrr_updates_on_aps = 0;
}
void mtrr_bp_restore(void)
{
- if (!use_intel())
- return;
- mtrr_if->set_all();
+ mtrr_set_all();
}
static int __init cf_check mtrr_init_finialize(void)
{
if (!mtrr_if)
return 0;
- if (use_intel())
- mtrr_state_warn();
+ mtrr_state_warn();
return 0;
}
__initcall(mtrr_init_finialize);
#define MTRR_CHANGE_MASK_VARIABLE 0x02
#define MTRR_CHANGE_MASK_DEFTYPE 0x04
-
-struct mtrr_ops {
- u32 vendor;
- bool use_intel_if;
-// void (*init)(void);
- void (*set)(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
- void (*set_all)(void);
-
- void (*get)(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type);
- int (*get_free_region)(unsigned long base, unsigned long size,
- int replace_reg);
- int (*validate_add_page)(unsigned long base, unsigned long size,
- unsigned int type);
- int (*have_wrcomb)(void);
-};
-
-int cf_check generic_get_free_region(
+void mtrr_get(
+ unsigned int reg, unsigned long *base, unsigned long *size,
+ mtrr_type *type);
+void mtrr_set(
+ unsigned int reg, unsigned long base, unsigned long size, mtrr_type type);
+void mtrr_set_all(void);
+int mtrr_get_free_region(
unsigned long base, unsigned long size, int replace_reg);
-int cf_check generic_validate_add_page(
+int mtrr_validate_add_page(
unsigned long base, unsigned long size, unsigned int type);
-
-extern const struct mtrr_ops generic_mtrr_ops;
+bool mtrr_have_wrcomb(void);
void get_mtrr_state(void);
-extern void set_mtrr_ops(const struct mtrr_ops *);
-
extern u64 size_or_mask, size_and_mask;
-extern const struct mtrr_ops *mtrr_if;
-
-#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel() (mtrr_if && mtrr_if->use_intel_if)
extern unsigned int num_var_ranges;