ia64/xen-unstable

view xen/drivers/passthrough/vtd/x86/vtd.c @ 19810:aa472909b39c

vtd: IO NUMA support

This patch adds VT-d RHSA processing for IO NUMA support. The basic
idea is to parse ACPI RHSA structure to obtain VT-d HW to proximity
domain mapping. This mapping is then used when allocating pages for
Vt-d HW data structures.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 23 11:14:24 2009 +0100 (2009-06-23)
parents 931dbe86e5f3
children
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <asm/paging.h>
24 #include <xen/iommu.h>
25 #include <xen/numa.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
30 /*
31 * iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
32 * 1:1 iommu mappings except xen and unusable regions.
33 */
34 static int iommu_inclusive_mapping;
35 boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
37 void *map_vtd_domain_page(u64 maddr)
38 {
39 return map_domain_page(maddr >> PAGE_SHIFT_4K);
40 }
42 void unmap_vtd_domain_page(void *va)
43 {
44 unmap_domain_page(va);
45 }
47 void free_pgtable_maddr(u64 maddr)
48 {
49 if ( maddr != 0 )
50 free_domheap_page(maddr_to_page(maddr));
51 }
53 unsigned int get_cache_line_size(void)
54 {
55 return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
56 }
58 void cacheline_flush(char * addr)
59 {
60 clflush(addr);
61 }
63 void flush_all_cache()
64 {
65 wbinvd();
66 }
68 void *map_to_nocache_virt(int nr_iommus, u64 maddr)
69 {
70 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, maddr);
71 return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
72 }
74 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
75 {
76 if ( !domain )
77 return NULL;
79 return domain->arch.hvm_domain.irq.dpci;
80 }
82 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
83 {
84 if ( !domain || !dpci )
85 return 0;
87 domain->arch.hvm_domain.irq.dpci = dpci;
88 return 1;
89 }
91 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
92 {
93 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
94 struct hvm_irq_dpci *dpci = NULL;
95 struct dev_intx_gsi_link *digl, *tmp;
96 int i;
98 ASSERT(isairq < NR_ISAIRQS);
99 if ( !iommu_enabled)
100 return;
102 spin_lock(&d->event_lock);
104 dpci = domain_get_irq_dpci(d);
106 if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
107 {
108 spin_unlock(&d->event_lock);
109 return;
110 }
111 /* Multiple mirq may be mapped to one isa irq */
112 for ( i = find_first_bit(dpci->mapping, d->nr_pirqs);
113 i < d->nr_pirqs;
114 i = find_next_bit(dpci->mapping, d->nr_pirqs, i + 1) )
115 {
116 list_for_each_entry_safe ( digl, tmp,
117 &dpci->mirq[i].digl_list, list )
118 {
119 if ( hvm_irq->pci_link.route[digl->link] == isairq )
120 {
121 hvm_pci_intx_deassert(d, digl->device, digl->intx);
122 if ( --dpci->mirq[i].pending == 0 )
123 {
124 stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
125 pirq_guest_eoi(d, i);
126 }
127 }
128 }
129 }
130 spin_unlock(&d->event_lock);
131 }
133 void iommu_set_dom0_mapping(struct domain *d)
134 {
135 u64 i, j, tmp, max_pfn;
136 extern int xen_in_range(paddr_t start, paddr_t end);
138 BUG_ON(d->domain_id != 0);
140 max_pfn = max_t(u64, max_page, 0x100000000ull >> PAGE_SHIFT);
142 for ( i = 0; i < max_pfn; i++ )
143 {
144 /*
145 * Set up 1:1 mapping for dom0. Default to use only conventional RAM
146 * areas and let RMRRs include needed reserved regions. When set, the
147 * inclusive mapping maps in everything below 4GB except unusable
148 * ranges.
149 */
150 if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
151 (!iommu_inclusive_mapping ||
152 page_is_ram_type(i, RAM_TYPE_UNUSABLE)) )
153 continue;
155 /* Exclude Xen bits */
156 if ( xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
157 continue;
159 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
160 for ( j = 0; j < tmp; j++ )
161 iommu_map_page(d, (i*tmp+j), (i*tmp+j));
162 }
163 }