ia64/xen-unstable

view xen/drivers/passthrough/iommu.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 78962f85c562
children
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms and conditions of the GNU General Public License,
4 * version 2, as published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9 * more details.
10 *
11 * You should have received a copy of the GNU General Public License along with
12 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
13 * Place - Suite 330, Boston, MA 02111-1307 USA.
14 */
16 #include <xen/sched.h>
17 #include <xen/iommu.h>
18 #include <asm/hvm/iommu.h>
19 #include <xen/paging.h>
20 #include <xen/guest_access.h>
22 static void parse_iommu_param(char *s);
23 static int iommu_populate_page_table(struct domain *d);
24 int intel_vtd_setup(void);
25 int amd_iov_detect(void);
27 /*
28 * The 'iommu' parameter enables the IOMMU. Optional comma separated
29 * value may contain:
30 *
31 * off|no|false|disable Disable IOMMU (default)
32 * pv Enable IOMMU for PV domains
33 * no-pv Disable IOMMU for PV domains (default)
34 * force|required Don't boot unless IOMMU is enabled
35 * passthrough Enable VT-d DMA passthrough (no DMA
36 * translation for Dom0)
37 * no-snoop Disable VT-d Snoop Control
38 * no-qinval Disable VT-d Queued Invalidation
39 * no-intremap Disable VT-d Interrupt Remapping
40 */
41 custom_param("iommu", parse_iommu_param);
42 int iommu_enabled = 0;
43 int iommu_pv_enabled = 0;
44 int force_iommu = 0;
45 int iommu_passthrough = 0;
46 int iommu_snoop = 0;
47 int iommu_qinval = 0;
48 int iommu_intremap = 0;
50 static void __init parse_iommu_param(char *s)
51 {
52 char *ss;
53 iommu_enabled = 1;
54 iommu_snoop = 1;
55 iommu_qinval = 1;
56 iommu_intremap = 1;
58 do {
59 ss = strchr(s, ',');
60 if ( ss )
61 *ss = '\0';
63 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
64 !strcmp(s, "0") || !strcmp(s, "disable") )
65 iommu_enabled = 0;
66 else if ( !strcmp(s, "pv") )
67 iommu_pv_enabled = 1;
68 else if ( !strcmp(s, "no-pv") )
69 iommu_pv_enabled = 0;
70 else if ( !strcmp(s, "force") || !strcmp(s, "required") )
71 force_iommu = 1;
72 else if ( !strcmp(s, "passthrough") )
73 iommu_passthrough = 1;
74 else if ( !strcmp(s, "no-snoop") )
75 iommu_snoop = 0;
76 else if ( !strcmp(s, "no-qinval") )
77 iommu_qinval = 0;
78 else if ( !strcmp(s, "no-intremap") )
79 iommu_intremap = 0;
81 s = ss + 1;
82 } while ( ss );
83 }
85 int iommu_domain_init(struct domain *domain)
86 {
87 struct hvm_iommu *hd = domain_hvm_iommu(domain);
89 spin_lock_init(&hd->mapping_lock);
90 INIT_LIST_HEAD(&hd->g2m_ioport_list);
92 if ( !iommu_enabled )
93 return 0;
95 hd->platform_ops = iommu_get_ops();
96 return hd->platform_ops->init(domain);
97 }
99 int iommu_add_device(struct pci_dev *pdev)
100 {
101 struct hvm_iommu *hd;
103 if ( !pdev->domain )
104 return -EINVAL;
106 ASSERT(spin_is_locked(&pcidevs_lock));
108 hd = domain_hvm_iommu(pdev->domain);
109 if ( !iommu_enabled || !hd->platform_ops )
110 return 0;
112 return hd->platform_ops->add_device(pdev);
113 }
115 int iommu_remove_device(struct pci_dev *pdev)
116 {
117 struct hvm_iommu *hd;
118 if ( !pdev->domain )
119 return -EINVAL;
121 hd = domain_hvm_iommu(pdev->domain);
122 if ( !iommu_enabled || !hd->platform_ops )
123 return 0;
125 return hd->platform_ops->remove_device(pdev);
126 }
128 int assign_device(struct domain *d, u8 bus, u8 devfn)
129 {
130 struct hvm_iommu *hd = domain_hvm_iommu(d);
131 int rc = 0;
133 if ( !iommu_enabled || !hd->platform_ops )
134 return 0;
136 spin_lock(&pcidevs_lock);
137 if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
138 goto done;
140 if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
141 {
142 d->need_iommu = 1;
143 rc = iommu_populate_page_table(d);
144 goto done;
145 }
146 done:
147 spin_unlock(&pcidevs_lock);
148 return rc;
149 }
151 static int iommu_populate_page_table(struct domain *d)
152 {
153 struct hvm_iommu *hd = domain_hvm_iommu(d);
154 struct page_info *page;
155 int rc;
157 spin_lock(&d->page_alloc_lock);
159 page_list_for_each ( page, &d->page_list )
160 {
161 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
162 {
163 rc = hd->platform_ops->map_page(
164 d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
165 if (rc)
166 {
167 spin_unlock(&d->page_alloc_lock);
168 hd->platform_ops->teardown(d);
169 return rc;
170 }
171 }
172 }
173 spin_unlock(&d->page_alloc_lock);
174 return 0;
175 }
178 void iommu_domain_destroy(struct domain *d)
179 {
180 struct hvm_iommu *hd = domain_hvm_iommu(d);
181 struct list_head *ioport_list, *tmp;
182 struct g2m_ioport *ioport;
184 if ( !iommu_enabled || !hd->platform_ops )
185 return;
187 if ( !is_hvm_domain(d) && !need_iommu(d) )
188 return;
190 if ( need_iommu(d) )
191 {
192 d->need_iommu = 0;
193 hd->platform_ops->teardown(d);
194 return;
195 }
197 if ( hd )
198 {
199 list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
200 {
201 ioport = list_entry(ioport_list, struct g2m_ioport, list);
202 list_del(&ioport->list);
203 xfree(ioport);
204 }
205 }
207 return hd->platform_ops->teardown(d);
208 }
210 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
211 {
212 struct hvm_iommu *hd = domain_hvm_iommu(d);
214 if ( !iommu_enabled || !hd->platform_ops )
215 return 0;
217 return hd->platform_ops->map_page(d, gfn, mfn);
218 }
220 int iommu_unmap_page(struct domain *d, unsigned long gfn)
221 {
222 struct hvm_iommu *hd = domain_hvm_iommu(d);
224 if ( !iommu_enabled || !hd->platform_ops )
225 return 0;
227 return hd->platform_ops->unmap_page(d, gfn);
228 }
230 /* caller should hold the pcidevs_lock */
231 int deassign_device(struct domain *d, u8 bus, u8 devfn)
232 {
233 struct hvm_iommu *hd = domain_hvm_iommu(d);
234 struct pci_dev *pdev = NULL;
236 if ( !iommu_enabled || !hd->platform_ops )
237 return -EINVAL;
239 ASSERT(spin_is_locked(&pcidevs_lock));
240 pdev = pci_get_pdev(bus, devfn);
241 if (!pdev)
242 return -ENODEV;
244 if (pdev->domain != d)
245 {
246 gdprintk(XENLOG_ERR VTDPREFIX,
247 "IOMMU: deassign a device not owned\n");
248 return -EINVAL;
249 }
251 hd->platform_ops->reassign_device(d, dom0, bus, devfn);
253 if ( !has_arch_pdevs(d) && need_iommu(d) )
254 {
255 d->need_iommu = 0;
256 hd->platform_ops->teardown(d);
257 }
259 return 0;
260 }
262 static int iommu_setup(void)
263 {
264 int rc = -ENODEV;
266 rc = iommu_hardware_setup();
268 iommu_enabled = (rc == 0);
270 if ( force_iommu && !iommu_enabled )
271 panic("IOMMU setup failed, crash Xen for security purpose!\n");
273 if ( !iommu_enabled )
274 iommu_pv_enabled = 0;
275 printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
276 if ( iommu_enabled )
277 printk("I/O virtualisation for PV guests %sabled\n",
278 iommu_pv_enabled ? "en" : "dis");
279 return rc;
280 }
281 __initcall(iommu_setup);
283 int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
284 XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
285 {
286 struct hvm_iommu *hd = domain_hvm_iommu(d);
287 struct pci_dev *pdev;
288 int group_id, sdev_id;
289 u32 bdf;
290 int i = 0;
291 struct iommu_ops *ops = hd->platform_ops;
293 if ( !iommu_enabled || !ops || !ops->get_device_group_id )
294 return 0;
296 group_id = ops->get_device_group_id(bus, devfn);
298 spin_lock(&pcidevs_lock);
299 for_each_pdev( d, pdev )
300 {
301 if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
302 continue;
304 sdev_id = ops->get_device_group_id(pdev->bus, pdev->devfn);
305 if ( (sdev_id == group_id) && (i < max_sdevs) )
306 {
307 bdf = 0;
308 bdf |= (pdev->bus & 0xff) << 16;
309 bdf |= (pdev->devfn & 0xff) << 8;
310 if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
311 {
312 spin_unlock(&pcidevs_lock);
313 return -1;
314 }
315 i++;
316 }
317 }
318 spin_unlock(&pcidevs_lock);
320 return i;
321 }
323 void iommu_update_ire_from_apic(
324 unsigned int apic, unsigned int reg, unsigned int value)
325 {
326 struct iommu_ops *ops = iommu_get_ops();
327 ops->update_ire_from_apic(apic, reg, value);
328 }
329 void iommu_update_ire_from_msi(
330 struct msi_desc *msi_desc, struct msi_msg *msg)
331 {
332 struct iommu_ops *ops = iommu_get_ops();
333 ops->update_ire_from_msi(msi_desc, msg);
334 }
336 void iommu_read_msi_from_ire(
337 struct msi_desc *msi_desc, struct msi_msg *msg)
338 {
339 struct iommu_ops *ops = iommu_get_ops();
340 ops->read_msi_from_ire(msi_desc, msg);
341 }
343 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
344 {
345 struct iommu_ops *ops = iommu_get_ops();
346 return ops->read_apic_from_ire(apic, reg);
347 }
349 /*
350 * Local variables:
351 * mode: C
352 * c-set-style: "BSD"
353 * c-basic-offset: 4
354 * indent-tabs-mode: nil
355 * End:
356 */