ia64/xen-unstable

view xen/drivers/passthrough/vtd/ia64/vtd.c @ 18803:2604400f75e3

vtd: fix memory allocation from NUMA node for VT-d.

Signed-off-by: Yuji Shimada <shimada-yxb@necst.nec.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 10:52:42 2008 +0000 (2008-11-18)
parents 6eb23f7ece78
children c15577ad46f2
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <xen/iommu.h>
24 #include <xen/numa.h>
25 #include <asm/xensystem.h>
26 #include <asm/sal.h>
27 #include "../iommu.h"
28 #include "../dmar.h"
29 #include "../vtd.h"
32 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
33 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
34 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
36 void *map_vtd_domain_page(u64 maddr)
37 {
38 return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
39 (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
40 }
42 void unmap_vtd_domain_page(void *va)
43 {
44 unmap_domain_page(va);
45 }
47 /* Allocate page table, return its machine address */
48 u64 alloc_pgtable_maddr(struct domain *d)
49 {
50 struct page_info *pg;
51 u64 *vaddr;
53 pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
54 vaddr = map_domain_page(page_to_mfn(pg));
55 if ( !vaddr )
56 return 0;
57 memset(vaddr, 0, PAGE_SIZE);
59 iommu_flush_cache_page(vaddr);
60 unmap_domain_page(vaddr);
62 return page_to_maddr(pg);
63 }
65 void free_pgtable_maddr(u64 maddr)
66 {
67 if ( maddr != 0 )
68 free_domheap_page(maddr_to_page(maddr));
69 }
71 unsigned int get_cache_line_size(void)
72 {
73 return L1_CACHE_BYTES;
74 }
76 void cacheline_flush(char * addr)
77 {
78 ia64_fc(addr);
79 ia64_sync_i();
80 ia64_srlz_i();
81 }
83 void flush_all_cache()
84 {
85 ia64_sal_cache_flush(3);
86 }
88 void * map_to_nocache_virt(int nr_iommus, u64 maddr)
89 {
90 return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
91 }
93 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
94 {
95 if ( !domain )
96 return NULL;
98 return domain->arch.hvm_domain.irq.dpci;
99 }
101 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
102 {
103 if ( !domain || !dpci )
104 return 0;
106 domain->arch.hvm_domain.irq.dpci = dpci;
107 return 1;
108 }
110 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
111 {
112 /* dummy */
113 }