ia64/xen-unstable

view xen/drivers/passthrough/vtd/utils.c @ 19733:a69daf23602a

VT-d: define a macro for waiting hardare completion

When set some registers of VT-d, it must wait for hardware
completion. There are lots of duplicated code to do that. This patch
defines a macro for it, thus it is much cleaner.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 09:27:18 2009 +0100 (2009-06-05)
parents 931dbe86e5f3
children cc07094a02e4
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 */
20 #include <xen/sched.h>
21 #include <xen/delay.h>
22 #include <xen/iommu.h>
23 #include <xen/time.h>
24 #include <xen/pci.h>
25 #include <xen/pci_regs.h>
26 #include "iommu.h"
27 #include "dmar.h"
28 #include "vtd.h"
29 #include "extern.h"
31 int is_usb_device(u8 bus, u8 devfn)
32 {
33 u16 class = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
34 PCI_CLASS_DEVICE);
35 return (class == 0xc03);
36 }
38 /* Disable vt-d protected memory registers. */
39 void disable_pmr(struct iommu *iommu)
40 {
41 u32 val;
43 val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
44 if ( !(val & DMA_PMEN_PRS) )
45 return;
47 dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
49 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl,
50 !(val & DMA_PMEN_PRS), val);
52 dprintk(XENLOG_INFO VTDPREFIX,
53 "Disabled protected memory registers\n");
54 }
56 void print_iommu_regs(struct acpi_drhd_unit *drhd)
57 {
58 struct iommu *iommu = drhd->iommu;
60 printk("---- print_iommu_regs ----\n");
61 printk("print_iommu_regs: drhd->address = %"PRIx64"\n", drhd->address);
62 printk("print_iommu_regs: DMAR_VER_REG = %x\n",
63 dmar_readl(iommu->reg,DMAR_VER_REG));
64 printk("print_iommu_regs: DMAR_CAP_REG = %"PRIx64"\n",
65 dmar_readq(iommu->reg,DMAR_CAP_REG));
66 printk("print_iommu_regs: n_fault_reg = %"PRIx64"\n",
67 cap_num_fault_regs(dmar_readq(iommu->reg, DMAR_CAP_REG)));
68 printk("print_iommu_regs: fault_recording_offset_l = %"PRIx64"\n",
69 cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)));
70 printk("print_iommu_regs: fault_recording_offset_h = %"PRIx64"\n",
71 cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)) + 8);
72 printk("print_iommu_regs: fault_recording_reg_l = %"PRIx64"\n",
73 dmar_readq(iommu->reg,
74 cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG))));
75 printk("print_iommu_regs: fault_recording_reg_h = %"PRIx64"\n",
76 dmar_readq(iommu->reg,
77 cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)) + 8));
78 printk("print_iommu_regs: DMAR_ECAP_REG = %"PRIx64"\n",
79 dmar_readq(iommu->reg,DMAR_ECAP_REG));
80 printk("print_iommu_regs: DMAR_GCMD_REG = %x\n",
81 dmar_readl(iommu->reg,DMAR_GCMD_REG));
82 printk("print_iommu_regs: DMAR_GSTS_REG = %x\n",
83 dmar_readl(iommu->reg,DMAR_GSTS_REG));
84 printk("print_iommu_regs: DMAR_RTADDR_REG = %"PRIx64"\n",
85 dmar_readq(iommu->reg,DMAR_RTADDR_REG));
86 printk("print_iommu_regs: DMAR_CCMD_REG = %"PRIx64"\n",
87 dmar_readq(iommu->reg,DMAR_CCMD_REG));
88 printk("print_iommu_regs: DMAR_FSTS_REG = %x\n",
89 dmar_readl(iommu->reg,DMAR_FSTS_REG));
90 printk("print_iommu_regs: DMAR_FECTL_REG = %x\n",
91 dmar_readl(iommu->reg,DMAR_FECTL_REG));
92 printk("print_iommu_regs: DMAR_FEDATA_REG = %x\n",
93 dmar_readl(iommu->reg,DMAR_FEDATA_REG));
94 printk("print_iommu_regs: DMAR_FEADDR_REG = %x\n",
95 dmar_readl(iommu->reg,DMAR_FEADDR_REG));
96 printk("print_iommu_regs: DMAR_FEUADDR_REG = %x\n",
97 dmar_readl(iommu->reg,DMAR_FEUADDR_REG));
98 }
100 u32 get_level_index(unsigned long gmfn, int level)
101 {
102 while ( --level )
103 gmfn = gmfn >> LEVEL_STRIDE;
105 return gmfn & LEVEL_MASK;
106 }
108 void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
109 {
110 struct context_entry *ctxt_entry;
111 struct root_entry *root_entry;
112 struct dma_pte pte;
113 u64 *l;
114 u32 l_index, level;
116 printk("print_vtd_entries: iommu = %p bdf = %x:%x:%x gmfn = %"PRIx64"\n",
117 iommu, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn);
119 if ( iommu->root_maddr == 0 )
120 {
121 printk(" iommu->root_maddr = 0\n");
122 return;
123 }
125 root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr);
127 printk(" root_entry = %p\n", root_entry);
128 printk(" root_entry[%x] = %"PRIx64"\n", bus, root_entry[bus].val);
129 if ( !root_present(root_entry[bus]) )
130 {
131 unmap_vtd_domain_page(root_entry);
132 printk(" root_entry[%x] not present\n", bus);
133 return;
134 }
136 ctxt_entry =
137 (struct context_entry *)map_vtd_domain_page(root_entry[bus].val);
138 if ( ctxt_entry == NULL )
139 {
140 unmap_vtd_domain_page(root_entry);
141 printk(" ctxt_entry == NULL\n");
142 return;
143 }
145 printk(" context = %p\n", ctxt_entry);
146 printk(" context[%x] = %"PRIx64"_%"PRIx64"\n",
147 devfn, ctxt_entry[devfn].hi, ctxt_entry[devfn].lo);
148 if ( !context_present(ctxt_entry[devfn]) )
149 {
150 unmap_vtd_domain_page(ctxt_entry);
151 unmap_vtd_domain_page(root_entry);
152 printk(" ctxt_entry[%x] not present\n", devfn);
153 return;
154 }
156 level = agaw_to_level(context_address_width(ctxt_entry[devfn]));
157 if ( level != VTD_PAGE_TABLE_LEVEL_3 &&
158 level != VTD_PAGE_TABLE_LEVEL_4)
159 {
160 unmap_vtd_domain_page(ctxt_entry);
161 unmap_vtd_domain_page(root_entry);
162 printk("Unsupported VTD page table level (%d)!\n", level);
163 }
165 l = maddr_to_virt(ctxt_entry[devfn].lo);
166 do
167 {
168 l = (u64*)(((unsigned long)l >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K);
169 printk(" l%d = %p\n", level, l);
170 if ( l == NULL )
171 {
172 unmap_vtd_domain_page(ctxt_entry);
173 unmap_vtd_domain_page(root_entry);
174 printk(" l%d == NULL\n", level);
175 break;
176 }
177 l_index = get_level_index(gmfn, level);
178 printk(" l%d_index = %x\n", level, l_index);
179 printk(" l%d[%x] = %"PRIx64"\n", level, l_index, l[l_index]);
181 pte.val = l[l_index];
182 if ( !dma_pte_present(pte) )
183 {
184 unmap_vtd_domain_page(ctxt_entry);
185 unmap_vtd_domain_page(root_entry);
186 printk(" l%d[%x] not present\n", level, l_index);
187 break;
188 }
190 l = maddr_to_virt(l[l_index]);
191 } while ( --level );
192 }
194 void dump_iommu_info(unsigned char key)
195 {
196 #if defined(__i386__) || defined(__x86_64__)
197 struct acpi_drhd_unit *drhd;
198 struct iommu *iommu;
199 int i;
201 for_each_drhd_unit ( drhd )
202 {
203 u32 status = 0;
205 iommu = drhd->iommu;
206 printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index,
207 iommu->nr_pt_levels);
209 if ( ecap_queued_inval(iommu->ecap) || ecap_intr_remap(iommu->ecap) )
210 status = dmar_readl(iommu->reg, DMAR_GSTS_REG);
212 printk(" Queued Invalidation: %ssupported%s.\n",
213 ecap_queued_inval(iommu->ecap) ? "" : "not ",
214 (status & DMA_GSTS_QIES) ? " and enabled" : "" );
217 printk(" Interrupt Remapping: %ssupported%s.\n",
218 ecap_intr_remap(iommu->ecap) ? "" : "not ",
219 (status & DMA_GSTS_IRES) ? " and enabled" : "" );
221 if ( status & DMA_GSTS_IRES )
222 {
223 /* Dump interrupt remapping table. */
224 u64 iremap_maddr = dmar_readq(iommu->reg, DMAR_IRTA_REG);
225 int nr_entry = 1 << ((iremap_maddr & 0xF) + 1);
226 struct iremap_entry *iremap_entries =
227 (struct iremap_entry *)map_vtd_domain_page(iremap_maddr);
229 printk(" Interrupt remapping table (nr_entry=0x%x. "
230 "Only dump P=1 entries here):\n", nr_entry);
231 printk(" SVT SQ SID DST V AVL DLM TM RH DM "
232 "FPD P\n");
233 for ( i = 0; i < nr_entry; i++ )
234 {
235 struct iremap_entry *p = iremap_entries + i;
237 if ( !p->lo.p )
238 continue;
239 printk(" %04x: %x %x %04x %08x %02x %x %x %x %x %x"
240 " %x %x\n", i,
241 (u32)p->hi.svt, (u32)p->hi.sq, (u32)p->hi.sid,
242 (u32)p->lo.dst, (u32)p->lo.vector, (u32)p->lo.avail,
243 (u32)p->lo.dlm, (u32)p->lo.tm, (u32)p->lo.rh,
244 (u32)p->lo.dm, (u32)p->lo.fpd, (u32)p->lo.p);
245 }
247 unmap_vtd_domain_page(iremap_entries);
248 }
249 }
251 /* Dump the I/O xAPIC redirection table(s). */
252 if ( iommu_enabled )
253 {
254 int apic, reg;
255 union IO_APIC_reg_01 reg_01;
256 struct IO_APIC_route_entry rte = { 0 };
257 struct IO_APIC_route_remap_entry *remap;
258 struct ir_ctrl *ir_ctrl;
260 for ( apic = 0; apic < nr_ioapics; apic++ )
261 {
262 iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
263 ir_ctrl = iommu_ir_ctrl(iommu);
264 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
265 ir_ctrl->iremap_index == -1 )
266 continue;
268 printk( "\nRedirection table of IOAPIC %x:\n", apic);
270 reg = 1; /* IO xAPIC Version Register. */
271 *IO_APIC_BASE(apic) = reg;
272 reg_01.raw = *(IO_APIC_BASE(apic)+4);
274 printk(" #entry IDX FMT MASK TRIG IRR POL STAT DELI VECTOR\n");
275 for ( i = 0; i <= reg_01.bits.entries; i++ )
276 {
277 reg = 0x10 + i*2;
278 *IO_APIC_BASE(apic) = reg;
279 *(((u32 *)&rte) + 0) = *(IO_APIC_BASE(apic)+4);
281 *IO_APIC_BASE(apic) = reg + 1;
282 *(((u32 *)&rte) + 1) = *(IO_APIC_BASE(apic)+4);
284 remap = (struct IO_APIC_route_remap_entry *) &rte;
285 if ( !remap->format )
286 continue;
288 printk(" %02x: %04x %x %x %x %x %x %x"
289 " %x %02x\n", i,
290 (u32)remap->index_0_14 | ((u32)remap->index_15 << 15),
291 (u32)remap->format, (u32)remap->mask, (u32)remap->trigger,
292 (u32)remap->irr, (u32)remap->polarity,
293 (u32)remap->delivery_status, (u32)remap->delivery_mode,
294 (u32)remap->vector);
295 }
296 }
297 }
298 #else
299 printk("%s: not implemnted on IA64 for now.\n", __func__);
300 /* ia64: TODO */
301 #endif
302 }
304 /*
305 * Local variables:
306 * mode: C
307 * c-set-style: "BSD"
308 * c-basic-offset: 4
309 * indent-tabs-mode: nil
310 * End:
311 */