#include <xen/trace.h>
#include <asm/setup.h>
#include <asm/fixmap.h>
+#include <asm/pci.h>
/* Mapping of the fixmap space needed early. */
l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
mem_sharing_get_nr_saved_mfns());
}
+const unsigned long *__init get_platform_badpages(unsigned int *array_size)
+{
+ u32 igd_id;
+ static unsigned long __initdata bad_pages[] = {
+ 0x20050000,
+ 0x20110000,
+ 0x20130000,
+ 0x20138000,
+ 0x40004000,
+ };
+
+ *array_size = ARRAY_SIZE(bad_pages);
+ igd_id = pci_conf_read32(0, 0, 2, 0, 0);
+ if ( !IS_SNB_GFX(igd_id) )
+ return NULL;
+
+ return bad_pages;
+}
+
/*
* Local variables:
* mode: C
{
unsigned long bad_spfn, bad_epfn;
const char *p;
+#ifdef CONFIG_X86
+ const unsigned long *badpage = NULL;
+ unsigned int i, array_size;
+#endif
ps = round_pgup(ps);
pe = round_pgdown(pe);
bootmem_region_add(ps >> PAGE_SHIFT, pe >> PAGE_SHIFT);
+#ifdef CONFIG_X86
+ /*
+ * Here we put platform-specific memory range workarounds, i.e.
+ * memory known to be corrupt or otherwise in need to be reserved on
+ * specific platforms.
+ * We get these certain pages and remove them from memory region list.
+ */
+ badpage = get_platform_badpages(&array_size);
+ if ( badpage )
+ {
+ for ( i = 0; i < array_size; i++ )
+ {
+ bootmem_region_zap(*badpage >> PAGE_SHIFT,
+ (*badpage >> PAGE_SHIFT) + 1);
+ badpage++;
+ }
+ }
+#endif
+
/* Check new pages against the bad-page list. */
p = opt_badpage;
while ( *p != '\0' )
#include <xen/keyhandler.h>
#include <asm/msi.h>
#include <asm/irq.h>
+#include <asm/pci.h>
#include <mach_apic.h>
#include "iommu.h"
#include "dmar.h"
#define IS_CTG(id) (id == 0x2a408086)
#define IS_ILK(id) (id == 0x00408086 || id == 0x00448086 || id== 0x00628086 || id == 0x006A8086)
#define IS_CPT(id) (id == 0x01008086 || id == 0x01048086)
-#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 || id == 0x01268086 || id == 0x01028086 || id == 0x01128086 || id == 0x01228086 || id == 0x010A8086)
static u32 __read_mostly ioh_id;
static u32 __initdata igd_id;
void clear_superpage_mark(struct page_info *page);
+const unsigned long *get_platform_badpages(unsigned int *array_size);
/* Per page locks:
* page_lock() is used for two purposes: pte serialization, and memory sharing.
*
#ifndef __X86_PCI_H__
#define __X86_PCI_H__
+#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 \
+ || id == 0x01268086 || id == 0x01028086 \
+ || id == 0x01128086 || id == 0x01228086 \
+ || id == 0x010A8086 )
+
struct arch_pci_dev {
vmask_t used_vectors;
};