ia64/xen-unstable

view xen/drivers/passthrough/vtd/x86/vtd.c @ 19227:08da408254c6

vtd, x86: Fix dom0 boot crash

Signed-off-by: Allen Kay allen.m.kay@intel.com
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 18 08:56:31 2009 +0000 (2009-02-18)
parents ac3ecce4502d
children 507b264f0a21
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <asm/paging.h>
24 #include <xen/iommu.h>
25 #include <xen/numa.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
30 void *map_vtd_domain_page(u64 maddr)
31 {
32 return map_domain_page(maddr >> PAGE_SHIFT_4K);
33 }
35 void unmap_vtd_domain_page(void *va)
36 {
37 unmap_domain_page(va);
38 }
40 /* Allocate page table, return its machine address */
41 u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
42 {
43 struct page_info *pg;
44 u64 *vaddr;
45 unsigned long mfn;
47 pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
48 d ? MEMF_node(domain_to_node(d)) : 0);
49 if ( !pg )
50 return 0;
51 mfn = page_to_mfn(pg);
52 vaddr = map_domain_page(mfn);
53 memset(vaddr, 0, PAGE_SIZE * npages);
55 iommu_flush_cache_page(vaddr, npages);
56 unmap_domain_page(vaddr);
58 return (u64)mfn << PAGE_SHIFT_4K;
59 }
61 void free_pgtable_maddr(u64 maddr)
62 {
63 if ( maddr != 0 )
64 free_domheap_page(maddr_to_page(maddr));
65 }
67 unsigned int get_cache_line_size(void)
68 {
69 return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
70 }
72 void cacheline_flush(char * addr)
73 {
74 clflush(addr);
75 }
77 void flush_all_cache()
78 {
79 wbinvd();
80 }
82 void *map_to_nocache_virt(int nr_iommus, u64 maddr)
83 {
84 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, maddr);
85 return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
86 }
88 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
89 {
90 if ( !domain )
91 return NULL;
93 return domain->arch.hvm_domain.irq.dpci;
94 }
96 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
97 {
98 if ( !domain || !dpci )
99 return 0;
101 domain->arch.hvm_domain.irq.dpci = dpci;
102 return 1;
103 }
105 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
106 {
107 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
108 struct hvm_irq_dpci *dpci = NULL;
109 struct dev_intx_gsi_link *digl, *tmp;
110 int i;
112 ASSERT(isairq < NR_ISAIRQS);
113 if ( !vtd_enabled)
114 return;
116 spin_lock(&d->event_lock);
118 dpci = domain_get_irq_dpci(d);
120 if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
121 {
122 spin_unlock(&d->event_lock);
123 return;
124 }
125 /* Multiple mirq may be mapped to one isa irq */
126 for ( i = find_first_bit(dpci->mapping, NR_IRQS);
127 i < NR_IRQS;
128 i = find_next_bit(dpci->mapping, NR_IRQS, i + 1) )
129 {
130 list_for_each_entry_safe ( digl, tmp,
131 &dpci->mirq[i].digl_list, list )
132 {
133 if ( hvm_irq->pci_link.route[digl->link] == isairq )
134 {
135 hvm_pci_intx_deassert(d, digl->device, digl->intx);
136 if ( --dpci->mirq[i].pending == 0 )
137 {
138 stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
139 pirq_guest_eoi(d, i);
140 }
141 }
142 }
143 }
144 spin_unlock(&d->event_lock);
145 }
147 void iommu_set_dom0_mapping(struct domain *d)
148 {
149 u64 i, j, tmp;
150 extern int xen_in_range(paddr_t start, paddr_t end);
152 BUG_ON(d->domain_id != 0);
154 for ( i = 0; i < max_page; i++ )
155 {
156 /* Set up 1:1 mapping for dom0 for all RAM except Xen bits. */
157 if ( !page_is_conventional_ram(i) ||
158 xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
159 continue;
161 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
162 for ( j = 0; j < tmp; j++ )
163 iommu_map_page(d, (i*tmp+j), (i*tmp+j));
164 }
165 }