#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/percpu.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/proto.h>
boot_cpu_data.x86_mask = eax & 0xf;
}
+#include <xen/interface/memory.h>
+unsigned long *machine_to_phys_mapping;
+EXPORT_SYMBOL(machine_to_phys_mapping);
+unsigned int machine_to_phys_order;
+EXPORT_SYMBOL(machine_to_phys_order);
+
void __init x86_64_start_kernel(char * real_mode_data)
{
+ struct xen_machphys_mapping mapping;
+ unsigned long machine_to_phys_nr_ents;
char *s;
int i;
xen_start_info->nr_pt_frames;
}
+
+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
+ }
+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
+ machine_to_phys_order++;
+
#if 0
for (i = 0; i < 256; i++)
set_intr_gate(i, early_idt_handler);
extern unsigned long *phys_to_machine_mapping;
+#undef machine_to_phys_mapping
+extern unsigned long *machine_to_phys_mapping;
+extern unsigned int machine_to_phys_order;
+
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (mfn >= MACH2PHYS_NR_ENTRIES)
+ if (unlikely((mfn >> machine_to_phys_order) != 0))
return max_mapnr;
/* The array access can fail (e.g., device space beyond end of RAM). */
" .long 1b,3b\n"
".previous"
: "=r" (pfn)
- : "m" (machine_to_phys_mapping[mfn]), "ir" (max_mapnr) );
+ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
return pfn;
}
* use of all of the static functions.
**/
+#include <xen/interface/memory.h>
+
static char * __init machine_specific_memory_setup(void)
{
unsigned long max_pfn = xen_start_info->nr_pages;
extern void failsafe_callback(void);
extern void nmi(void);
+unsigned long *machine_to_phys_mapping;
+EXPORT_SYMBOL(machine_to_phys_mapping);
+unsigned int machine_to_phys_order;
+EXPORT_SYMBOL(machine_to_phys_order);
+
static void __init machine_specific_arch_setup(void)
{
+ struct xen_machphys_mapping mapping;
+ unsigned long machine_to_phys_nr_ents;
struct xen_platform_parameters pp;
struct xennmi_callback cb;
if (HYPERVISOR_xen_version(XENVER_platform_parameters,
&pp) == 0)
set_fixaddr_top(pp.virt_start - PAGE_SIZE);
+
+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
+ }
+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
+ machine_to_phys_order++;
}
extern unsigned long *phys_to_machine_mapping;
+#undef machine_to_phys_mapping
+extern unsigned long *machine_to_phys_mapping;
+extern unsigned int machine_to_phys_order;
+
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (mfn >= MACH2PHYS_NR_ENTRIES)
+ if (unlikely((mfn >> machine_to_phys_order) != 0))
return end_pfn;
/* The array access can fail (e.g., device space beyond end of RAM). */
" .quad 1b,3b\n"
".previous"
: "=r" (pfn)
- : "m" (machine_to_phys_mapping[mfn]), "ir" (end_pfn) );
+ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
return pfn;
}
break;
}
+ case XENMEM_machphys_mapping:
+ {
+ struct xen_machphys_mapping mapping = {
+ .v_start = MACH2PHYS_VIRT_START,
+ .v_end = MACH2PHYS_VIRT_END,
+ .max_mfn = MACH2PHYS_NR_ENTRIES - 1
+ };
+
+ if ( copy_to_guest(arg, &mapping, 1) )
+ return -EFAULT;
+
+ return 0;
+ }
+
default:
return subarch_memory_op(op, arg);
}
} xen_machphys_mfn_list_t;
DEFINE_GUEST_HANDLE(xen_machphys_mfn_list_t);
+/*
+ * Returns the location in virtual address space of the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table, or which do not
+ * map it by default into guest address space, do not implement this command.
+ * arg == addr of xen_machphys_mapping_t.
+ */
+#define XENMEM_machphys_mapping 12
+typedef struct xen_machphys_mapping {
+ unsigned long v_start, v_end; /* Start and end virtual addresses. */
+ unsigned long max_mfn; /* Maximum MFN that can be looked up. */
+} xen_machphys_mapping_t;
+DEFINE_GUEST_HANDLE(xen_machphys_mapping_t);
+
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.