ia64/xen-unstable

view xen/drivers/passthrough/iommu.c @ 18650:609d0d34450f

vtd: code cleanup

Remove iommu_page_mapping/unmapping, which are redundant because
intel_iommu_map_page/unmap_page can handle their functions.

Correct IRTA_REG_EIMI_SHIFT to IRTA_REG_EIME_SHIFT.

and also remove useless declarations in iommu.c

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Oct 17 12:04:11 2008 +0100 (2008-10-17)
parents 3ff2461bb5a1
children 2941b1a97c60
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms and conditions of the GNU General Public License,
4 * version 2, as published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9 * more details.
10 *
11 * You should have received a copy of the GNU General Public License along with
12 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
13 * Place - Suite 330, Boston, MA 02111-1307 USA.
14 */
16 #include <xen/sched.h>
17 #include <xen/iommu.h>
18 #include <asm/hvm/iommu.h>
19 #include <xen/paging.h>
20 #include <xen/guest_access.h>
22 static void parse_iommu_param(char *s);
23 static int iommu_populate_page_table(struct domain *d);
24 int intel_vtd_setup(void);
25 int amd_iov_detect(void);
27 /*
28 * The 'iommu' parameter enables the IOMMU. Optional comma separated
29 * value may contain:
30 *
31 * off|no|false|disable Disable IOMMU (default)
32 * pv Enable IOMMU for PV domains
33 * no-pv Disable IOMMU for PV domains (default)
34 * force|required Don't boot unless IOMMU is enabled
35 * passthrough Bypass VT-d translation for Dom0
36 */
37 custom_param("iommu", parse_iommu_param);
38 int iommu_enabled = 0;
39 int iommu_pv_enabled = 0;
40 int force_iommu = 0;
41 int iommu_passthrough = 0;
43 static void __init parse_iommu_param(char *s)
44 {
45 char *ss;
46 iommu_enabled = 1;
48 do {
49 ss = strchr(s, ',');
50 if ( ss )
51 *ss = '\0';
53 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
54 !strcmp(s, "0") || !strcmp(s, "disable") )
55 iommu_enabled = 0;
56 else if ( !strcmp(s, "pv") )
57 iommu_pv_enabled = 1;
58 else if ( !strcmp(s, "no-pv") )
59 iommu_pv_enabled = 0;
60 else if ( !strcmp(s, "force") || !strcmp(s, "required") )
61 force_iommu = 1;
62 else if ( !strcmp(s, "passthrough") )
63 iommu_passthrough = 1;
65 s = ss + 1;
66 } while ( ss );
67 }
69 int iommu_domain_init(struct domain *domain)
70 {
71 struct hvm_iommu *hd = domain_hvm_iommu(domain);
73 spin_lock_init(&hd->mapping_lock);
74 INIT_LIST_HEAD(&hd->g2m_ioport_list);
76 if ( !iommu_enabled )
77 return 0;
79 hd->platform_ops = iommu_get_ops();
80 return hd->platform_ops->init(domain);
81 }
83 int iommu_add_device(struct pci_dev *pdev)
84 {
85 struct hvm_iommu *hd;
86 if ( !pdev->domain )
87 return -EINVAL;
89 hd = domain_hvm_iommu(pdev->domain);
90 if ( !iommu_enabled || !hd->platform_ops )
91 return 0;
93 return hd->platform_ops->add_device(pdev);
94 }
96 int iommu_remove_device(struct pci_dev *pdev)
97 {
98 struct hvm_iommu *hd;
99 if ( !pdev->domain )
100 return -EINVAL;
102 hd = domain_hvm_iommu(pdev->domain);
103 if ( !iommu_enabled || !hd->platform_ops )
104 return 0;
106 return hd->platform_ops->remove_device(pdev);
107 }
109 int assign_device(struct domain *d, u8 bus, u8 devfn)
110 {
111 struct hvm_iommu *hd = domain_hvm_iommu(d);
112 int rc;
114 if ( !iommu_enabled || !hd->platform_ops )
115 return 0;
117 if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
118 return rc;
120 if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
121 {
122 d->need_iommu = 1;
123 return iommu_populate_page_table(d);
124 }
125 return 0;
126 }
128 static int iommu_populate_page_table(struct domain *d)
129 {
130 struct hvm_iommu *hd = domain_hvm_iommu(d);
131 struct page_info *page;
132 int rc;
134 spin_lock(&d->page_alloc_lock);
136 list_for_each_entry ( page, &d->page_list, list )
137 {
138 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
139 {
140 rc = hd->platform_ops->map_page(
141 d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
142 if (rc)
143 {
144 spin_unlock(&d->page_alloc_lock);
145 hd->platform_ops->teardown(d);
146 return rc;
147 }
148 }
149 }
150 spin_unlock(&d->page_alloc_lock);
151 return 0;
152 }
155 void iommu_domain_destroy(struct domain *d)
156 {
157 struct hvm_iommu *hd = domain_hvm_iommu(d);
158 struct list_head *ioport_list, *tmp;
159 struct g2m_ioport *ioport;
161 if ( !iommu_enabled || !hd->platform_ops )
162 return;
164 if ( !is_hvm_domain(d) && !need_iommu(d) )
165 return;
167 if ( need_iommu(d) )
168 {
169 d->need_iommu = 0;
170 hd->platform_ops->teardown(d);
171 return;
172 }
174 if ( hd )
175 {
176 list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
177 {
178 ioport = list_entry(ioport_list, struct g2m_ioport, list);
179 list_del(&ioport->list);
180 xfree(ioport);
181 }
182 }
184 return hd->platform_ops->teardown(d);
185 }
187 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
188 {
189 struct hvm_iommu *hd = domain_hvm_iommu(d);
191 if ( !iommu_enabled || !hd->platform_ops )
192 return 0;
194 return hd->platform_ops->map_page(d, gfn, mfn);
195 }
197 int iommu_unmap_page(struct domain *d, unsigned long gfn)
198 {
199 struct hvm_iommu *hd = domain_hvm_iommu(d);
201 if ( !iommu_enabled || !hd->platform_ops )
202 return 0;
204 return hd->platform_ops->unmap_page(d, gfn);
205 }
207 void deassign_device(struct domain *d, u8 bus, u8 devfn)
208 {
209 struct hvm_iommu *hd = domain_hvm_iommu(d);
211 if ( !iommu_enabled || !hd->platform_ops )
212 return;
214 hd->platform_ops->reassign_device(d, dom0, bus, devfn);
216 if ( !has_arch_pdevs(d) && need_iommu(d) )
217 {
218 d->need_iommu = 0;
219 hd->platform_ops->teardown(d);
220 }
221 }
223 static int iommu_setup(void)
224 {
225 int rc = -ENODEV;
227 if ( !iommu_enabled )
228 goto out;
230 rc = iommu_hardware_setup();
232 iommu_enabled = (rc == 0);
234 out:
235 if ( force_iommu && !iommu_enabled )
236 panic("IOMMU setup failed, crash Xen for security purpose!\n");
238 if ( !iommu_enabled )
239 iommu_pv_enabled = 0;
240 printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
241 if ( iommu_enabled )
242 printk("I/O virtualisation for PV guests %sabled\n",
243 iommu_pv_enabled ? "en" : "dis");
244 return rc;
245 }
246 __initcall(iommu_setup);
248 int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
249 XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
250 {
251 struct hvm_iommu *hd = domain_hvm_iommu(d);
252 struct pci_dev *pdev;
253 int group_id, sdev_id;
254 u32 bdf;
255 int i = 0;
256 struct iommu_ops *ops = hd->platform_ops;
258 if ( !iommu_enabled || !ops || !ops->get_device_group_id )
259 return 0;
261 group_id = ops->get_device_group_id(bus, devfn);
263 read_lock(&pcidevs_lock);
264 for_each_pdev( d, pdev )
265 {
266 if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
267 continue;
269 sdev_id = ops->get_device_group_id(pdev->bus, pdev->devfn);
270 if ( (sdev_id == group_id) && (i < max_sdevs) )
271 {
272 bdf = 0;
273 bdf |= (pdev->bus & 0xff) << 16;
274 bdf |= (pdev->devfn & 0xff) << 8;
275 if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
276 {
277 read_unlock(&pcidevs_lock);
278 return -1;
279 }
280 i++;
281 }
282 }
283 read_unlock(&pcidevs_lock);
285 return i;
286 }
288 void iommu_update_ire_from_apic(
289 unsigned int apic, unsigned int reg, unsigned int value)
290 {
291 struct iommu_ops *ops = iommu_get_ops();
292 ops->update_ire_from_apic(apic, reg, value);
293 }
294 void iommu_update_ire_from_msi(
295 struct msi_desc *msi_desc, struct msi_msg *msg)
296 {
297 struct iommu_ops *ops = iommu_get_ops();
298 ops->update_ire_from_msi(msi_desc, msg);
299 }
300 /*
301 * Local variables:
302 * mode: C
303 * c-set-style: "BSD"
304 * c-basic-offset: 4
305 * indent-tabs-mode: nil
306 * End:
307 */