ia64/xen-unstable

view xen/drivers/passthrough/vtd/ia64/vtd.c @ 19810:aa472909b39c

vtd: IO NUMA support

This patch adds VT-d RHSA processing for IO NUMA support. The basic
idea is to parse ACPI RHSA structure to obtain VT-d HW to proximity
domain mapping. This mapping is then used when allocating pages for
Vt-d HW data structures.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 23 11:14:24 2009 +0100 (2009-06-23)
parents 247023fb724c
children
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <xen/iommu.h>
24 #include <xen/numa.h>
25 #include <asm/xensystem.h>
26 #include <asm/sal.h>
27 #include "../iommu.h"
28 #include "../dmar.h"
29 #include "../vtd.h"
32 int vector_irq[NR_VECTORS] __read_mostly = {
33 [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN_IRQ
34 };
35 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
36 u8 irq_vector[NR_IRQS] __read_mostly;
38 void *map_vtd_domain_page(u64 maddr)
39 {
40 return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
41 (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
42 }
44 void unmap_vtd_domain_page(void *va)
45 {
46 unmap_domain_page(va);
47 }
49 void free_pgtable_maddr(u64 maddr)
50 {
51 if ( maddr != 0 )
52 free_domheap_page(maddr_to_page(maddr));
53 }
55 unsigned int get_cache_line_size(void)
56 {
57 return L1_CACHE_BYTES;
58 }
60 void cacheline_flush(char * addr)
61 {
62 ia64_fc(addr);
63 ia64_sync_i();
64 ia64_srlz_i();
65 }
67 void flush_all_cache()
68 {
69 ia64_sal_cache_flush(3);
70 }
72 void * map_to_nocache_virt(int nr_iommus, u64 maddr)
73 {
74 return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
75 }
77 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
78 {
79 if ( !domain )
80 return NULL;
82 return domain->arch.hvm_domain.irq.dpci;
83 }
85 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
86 {
87 if ( !domain || !dpci )
88 return 0;
90 domain->arch.hvm_domain.irq.dpci = dpci;
91 return 1;
92 }
94 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
95 {
96 /* dummy */
97 }
99 static int do_dom0_iommu_mapping(unsigned long start, unsigned long end,
100 void *arg)
101 {
102 unsigned long tmp, pfn, j, page_addr = start;
103 struct domain *d = (struct domain *)arg;
105 extern int xen_in_range(paddr_t start, paddr_t end);
106 /* Set up 1:1 page table for dom0 for all Ram except Xen bits.*/
108 while (page_addr < end)
109 {
110 if (xen_in_range(page_addr, page_addr + PAGE_SIZE))
111 continue;
113 pfn = page_addr >> PAGE_SHIFT;
114 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
115 for ( j = 0; j < tmp; j++ )
116 iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
118 page_addr += PAGE_SIZE;
119 }
120 return 0;
121 }
123 void iommu_set_dom0_mapping(struct domain *d)
124 {
125 if (dom0)
126 BUG_ON(d != dom0);
127 efi_memmap_walk(do_dom0_iommu_mapping, d);
128 }