ia64/xen-unstable

view xen/drivers/passthrough/vtd/ia64/vtd.c @ 19187:1eb6afcad849

vtd: adding support for multiple queued invalidation pages

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 09 14:23:51 2009 +0000 (2009-02-09)
parents c15577ad46f2
children 09ea7eea8122
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <xen/iommu.h>
24 #include <xen/numa.h>
25 #include <asm/xensystem.h>
26 #include <asm/sal.h>
27 #include "../iommu.h"
28 #include "../dmar.h"
29 #include "../vtd.h"
32 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
33 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
34 u8 irq_vector[NR_IRQS] __read_mostly;
36 void *map_vtd_domain_page(u64 maddr)
37 {
38 return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
39 (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
40 }
42 void unmap_vtd_domain_page(void *va)
43 {
44 unmap_domain_page(va);
45 }
47 /* Allocate page table, return its machine address */
48 u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
49 {
50 struct page_info *pg;
51 u64 *vaddr;
53 pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
54 d ? MEMF_node(domain_to_node(d)) : 0);
55 vaddr = map_domain_page(page_to_mfn(pg));
56 if ( !vaddr )
57 return 0;
58 memset(vaddr, 0, PAGE_SIZE * npages);
60 iommu_flush_cache_page(vaddr);
61 unmap_domain_page(vaddr);
63 return page_to_maddr(pg);
64 }
66 void free_pgtable_maddr(u64 maddr)
67 {
68 if ( maddr != 0 )
69 free_domheap_page(maddr_to_page(maddr));
70 }
72 unsigned int get_cache_line_size(void)
73 {
74 return L1_CACHE_BYTES;
75 }
77 void cacheline_flush(char * addr)
78 {
79 ia64_fc(addr);
80 ia64_sync_i();
81 ia64_srlz_i();
82 }
84 void flush_all_cache()
85 {
86 ia64_sal_cache_flush(3);
87 }
89 void * map_to_nocache_virt(int nr_iommus, u64 maddr)
90 {
91 return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
92 }
94 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
95 {
96 if ( !domain )
97 return NULL;
99 return domain->arch.hvm_domain.irq.dpci;
100 }
102 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
103 {
104 if ( !domain || !dpci )
105 return 0;
107 domain->arch.hvm_domain.irq.dpci = dpci;
108 return 1;
109 }
111 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
112 {
113 /* dummy */
114 }