ia64/xen-unstable

view xen/drivers/passthrough/vtd/x86/vtd.c @ 18803:2604400f75e3

vtd: fix memory allocation from NUMA node for VT-d.

Signed-off-by: Yuji Shimada <shimada-yxb@necst.nec.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 10:52:42 2008 +0000 (2008-11-18)
parents 2188ed106885
children 1eb6afcad849
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <asm/paging.h>
24 #include <xen/iommu.h>
25 #include <xen/numa.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
30 void *map_vtd_domain_page(u64 maddr)
31 {
32 return map_domain_page(maddr >> PAGE_SHIFT_4K);
33 }
35 void unmap_vtd_domain_page(void *va)
36 {
37 unmap_domain_page(va);
38 }
40 /* Allocate page table, return its machine address */
41 u64 alloc_pgtable_maddr(struct domain *d)
42 {
43 struct page_info *pg;
44 u64 *vaddr;
45 unsigned long mfn;
47 pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
48 if ( !pg )
49 return 0;
50 mfn = page_to_mfn(pg);
51 vaddr = map_domain_page(mfn);
52 memset(vaddr, 0, PAGE_SIZE);
54 iommu_flush_cache_page(vaddr);
55 unmap_domain_page(vaddr);
57 return (u64)mfn << PAGE_SHIFT_4K;
58 }
60 void free_pgtable_maddr(u64 maddr)
61 {
62 if ( maddr != 0 )
63 free_domheap_page(maddr_to_page(maddr));
64 }
66 unsigned int get_cache_line_size(void)
67 {
68 return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
69 }
71 void cacheline_flush(char * addr)
72 {
73 clflush(addr);
74 }
76 void flush_all_cache()
77 {
78 wbinvd();
79 }
81 void *map_to_nocache_virt(int nr_iommus, u64 maddr)
82 {
83 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, maddr);
84 return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
85 }
87 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
88 {
89 if ( !domain )
90 return NULL;
92 return domain->arch.hvm_domain.irq.dpci;
93 }
95 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
96 {
97 if ( !domain || !dpci )
98 return 0;
100 domain->arch.hvm_domain.irq.dpci = dpci;
101 return 1;
102 }
104 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
105 {
106 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
107 struct hvm_irq_dpci *dpci = NULL;
108 struct dev_intx_gsi_link *digl, *tmp;
109 int i;
111 ASSERT(isairq < NR_ISAIRQS);
112 if ( !vtd_enabled)
113 return;
115 spin_lock(&d->event_lock);
117 dpci = domain_get_irq_dpci(d);
119 if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
120 {
121 spin_unlock(&d->event_lock);
122 return;
123 }
124 /* Multiple mirq may be mapped to one isa irq */
125 for ( i = find_first_bit(dpci->mapping, NR_IRQS);
126 i < NR_IRQS;
127 i = find_next_bit(dpci->mapping, NR_IRQS, i + 1) )
128 {
129 list_for_each_entry_safe ( digl, tmp,
130 &dpci->mirq[i].digl_list, list )
131 {
132 if ( hvm_irq->pci_link.route[digl->link] == isairq )
133 {
134 hvm_pci_intx_deassert(d, digl->device, digl->intx);
135 if ( --dpci->mirq[i].pending == 0 )
136 {
137 stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
138 pirq_guest_eoi(d, i);
139 }
140 }
141 }
142 }
143 spin_unlock(&d->event_lock);
144 }