ia64/xen-unstable

view xen/drivers/passthrough/amd/pci_amd_iommu.c @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 4771bceb1889
children
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/sched.h>
22 #include <xen/pci.h>
23 #include <xen/pci_regs.h>
24 #include <asm/amd-iommu.h>
25 #include <asm/hvm/svm/amd-iommu-proto.h>
27 extern unsigned short ivrs_bdf_entries;
28 extern struct ivrs_mappings *ivrs_mappings;
29 extern void *int_remap_table;
31 int __init amd_iommu_init(void)
32 {
33 struct amd_iommu *iommu;
35 BUG_ON( !iommu_found() );
37 ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
39 if ( !ivrs_bdf_entries )
40 goto error_out;
42 if ( amd_iommu_setup_shared_tables() != 0 )
43 goto error_out;
45 if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
46 goto error_out;
48 for_each_amd_iommu ( iommu )
49 if ( amd_iommu_init_one(iommu) != 0 )
50 goto error_out;
52 return 0;
54 error_out:
55 amd_iommu_init_cleanup();
56 return -ENODEV;
57 }
59 struct amd_iommu *find_iommu_for_device(int bus, int devfn)
60 {
61 u16 bdf = (bus << 8) | devfn;
62 BUG_ON ( bdf >= ivrs_bdf_entries );
63 return ivrs_mappings[bdf].iommu;
64 }
66 static void amd_iommu_setup_domain_device(
67 struct domain *domain, struct amd_iommu *iommu, int bdf)
68 {
69 void *dte;
70 unsigned long flags;
71 int req_id;
72 u8 sys_mgt, dev_ex;
73 struct hvm_iommu *hd = domain_hvm_iommu(domain);
75 BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
77 /* get device-table entry */
78 req_id = ivrs_mappings[bdf].dte_requestor_id;
79 dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
81 spin_lock_irqsave(&iommu->lock, flags);
83 if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
84 {
85 /* bind DTE to domain page-tables */
86 sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
87 dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
89 amd_iommu_set_dev_table_entry((u32 *)dte,
90 page_to_maddr(hd->root_table),
91 virt_to_maddr(int_remap_table),
92 hd->domain_id, sys_mgt, dev_ex,
93 hd->paging_mode);
95 invalidate_dev_table_entry(iommu, req_id);
96 invalidate_interrupt_table(iommu, req_id);
97 flush_command_buffer(iommu);
98 amd_iov_info("Enable DTE:0x%x, "
99 "root_table:%"PRIx64", interrupt_table:%"PRIx64", "
100 "domain_id:%d, paging_mode:%d\n",
101 req_id, (u64)page_to_maddr(hd->root_table),
102 (u64)virt_to_maddr(int_remap_table), hd->domain_id,
103 hd->paging_mode);
104 }
106 spin_unlock_irqrestore(&iommu->lock, flags);
108 }
110 static void amd_iommu_setup_dom0_devices(struct domain *d)
111 {
112 struct amd_iommu *iommu;
113 struct pci_dev *pdev;
114 int bus, dev, func;
115 u32 l;
116 int bdf;
118 spin_lock(&pcidevs_lock);
119 for ( bus = 0; bus < 256; bus++ )
120 {
121 for ( dev = 0; dev < 32; dev++ )
122 {
123 for ( func = 0; func < 8; func++ )
124 {
125 l = pci_conf_read32(bus, dev, func, PCI_VENDOR_ID);
126 /* some broken boards return 0 or ~0 if a slot is empty: */
127 if ( (l == 0xffffffff) || (l == 0x00000000) ||
128 (l == 0x0000ffff) || (l == 0xffff0000) )
129 continue;
131 pdev = alloc_pdev(bus, PCI_DEVFN(dev, func));
132 pdev->domain = d;
133 list_add(&pdev->domain_list, &d->arch.pdev_list);
135 bdf = (bus << 8) | pdev->devfn;
136 /* supported device? */
137 iommu = (bdf < ivrs_bdf_entries) ?
138 find_iommu_for_device(bus, pdev->devfn) : NULL;
140 if ( iommu )
141 amd_iommu_setup_domain_device(d, iommu, bdf);
142 }
143 }
144 }
145 spin_unlock(&pcidevs_lock);
146 }
148 int amd_iov_detect(void)
149 {
150 INIT_LIST_HEAD(&amd_iommu_head);
152 if ( amd_iommu_detect_acpi() != 0 )
153 {
154 amd_iov_error("Error detection\n");
155 return -ENODEV;
156 }
158 if ( !iommu_found() )
159 {
160 printk("AMD_IOV: IOMMU not found!\n");
161 return -ENODEV;
162 }
164 if ( amd_iommu_init() != 0 )
165 {
166 amd_iov_error("Error initialization\n");
167 return -ENODEV;
168 }
169 return 0;
170 }
172 static int allocate_domain_resources(struct hvm_iommu *hd)
173 {
174 /* allocate root table */
175 spin_lock(&hd->mapping_lock);
176 if ( !hd->root_table )
177 {
178 hd->root_table = alloc_amd_iommu_pgtable();
179 if ( !hd->root_table )
180 {
181 spin_unlock(&hd->mapping_lock);
182 return -ENOMEM;
183 }
184 }
185 spin_unlock(&hd->mapping_lock);
186 return 0;
187 }
189 static int get_paging_mode(unsigned long entries)
190 {
191 int level = 1;
193 BUG_ON(!max_page);
195 if ( entries > max_page )
196 entries = max_page;
198 while ( entries > PTE_PER_TABLE_SIZE )
199 {
200 entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
201 if ( ++level > 6 )
202 return -ENOMEM;
203 }
205 return level;
206 }
208 static int amd_iommu_domain_init(struct domain *domain)
209 {
210 struct hvm_iommu *hd = domain_hvm_iommu(domain);
212 /* allocate page directroy */
213 if ( allocate_domain_resources(hd) != 0 )
214 {
215 if ( hd->root_table )
216 free_domheap_page(hd->root_table);
217 return -ENOMEM;
218 }
220 hd->paging_mode = is_hvm_domain(domain)?
221 IOMMU_PAGE_TABLE_LEVEL_4 : get_paging_mode(max_page);
223 if ( domain->domain_id == 0 )
224 {
225 unsigned long i;
226 /* setup 1:1 page table for dom0 */
227 for ( i = 0; i < max_page; i++ )
228 amd_iommu_map_page(domain, i, i);
230 amd_iommu_setup_dom0_devices(domain);
231 }
233 hd->domain_id = domain->domain_id;
235 return 0;
236 }
238 static void amd_iommu_disable_domain_device(
239 struct domain *domain, struct amd_iommu *iommu, int bdf)
240 {
241 void *dte;
242 unsigned long flags;
243 int req_id;
245 req_id = ivrs_mappings[bdf].dte_requestor_id;
246 dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
248 spin_lock_irqsave(&iommu->lock, flags);
249 if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
250 {
251 memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
252 invalidate_dev_table_entry(iommu, req_id);
253 flush_command_buffer(iommu);
254 amd_iov_info("Disable DTE:0x%x,"
255 " domain_id:%d, paging_mode:%d\n",
256 req_id, domain_hvm_iommu(domain)->domain_id,
257 domain_hvm_iommu(domain)->paging_mode);
258 }
259 spin_unlock_irqrestore(&iommu->lock, flags);
260 }
262 static int reassign_device( struct domain *source, struct domain *target,
263 u8 bus, u8 devfn)
264 {
265 struct pci_dev *pdev;
266 struct amd_iommu *iommu;
267 int bdf;
269 ASSERT(spin_is_locked(&pcidevs_lock));
270 pdev = pci_get_pdev_by_domain(source, bus, devfn);
271 if ( !pdev )
272 return -ENODEV;
274 bdf = (bus << 8) | devfn;
275 /* supported device? */
276 iommu = (bdf < ivrs_bdf_entries) ?
277 find_iommu_for_device(bus, pdev->devfn) : NULL;
279 if ( !iommu )
280 {
281 amd_iov_error("Fail to find iommu."
282 " %x:%x.%x cannot be assigned to domain %d\n",
283 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
284 return -ENODEV;
285 }
287 amd_iommu_disable_domain_device(source, iommu, bdf);
289 list_move(&pdev->domain_list, &target->arch.pdev_list);
290 pdev->domain = target;
292 amd_iommu_setup_domain_device(target, iommu, bdf);
293 amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
294 bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
295 source->domain_id, target->domain_id);
297 return 0;
298 }
300 static int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
301 {
302 int bdf = (bus << 8) | devfn;
303 int req_id = ivrs_mappings[bdf].dte_requestor_id;
305 amd_iommu_sync_p2m(d);
307 if ( ivrs_mappings[req_id].unity_map_enable )
308 {
309 amd_iommu_reserve_domain_unity_map(
310 d,
311 ivrs_mappings[req_id].addr_range_start,
312 ivrs_mappings[req_id].addr_range_length,
313 ivrs_mappings[req_id].write_permission,
314 ivrs_mappings[req_id].read_permission);
315 }
317 return reassign_device(dom0, d, bus, devfn);
318 }
320 static void deallocate_next_page_table(struct page_info* pg, int level)
321 {
322 void *table_vaddr, *pde;
323 u64 next_table_maddr;
324 int index;
326 table_vaddr = map_domain_page(page_to_mfn(pg));
328 if ( level > 1 )
329 {
330 for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
331 {
332 pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
333 next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
334 if ( next_table_maddr != 0 )
335 {
336 deallocate_next_page_table(
337 maddr_to_page(next_table_maddr), level - 1);
338 }
339 }
340 }
342 unmap_domain_page(table_vaddr);
343 free_amd_iommu_pgtable(pg);
344 }
346 static void deallocate_iommu_page_tables(struct domain *d)
347 {
348 struct hvm_iommu *hd = domain_hvm_iommu(d);
350 spin_lock(&hd->mapping_lock);
351 if ( hd->root_table )
352 {
353 deallocate_next_page_table(hd->root_table, hd->paging_mode);
354 hd->root_table = NULL;
355 }
356 spin_unlock(&hd->mapping_lock);
357 }
360 static void amd_iommu_domain_destroy(struct domain *d)
361 {
362 deallocate_iommu_page_tables(d);
363 invalidate_all_iommu_pages(d);
364 }
366 static int amd_iommu_return_device(
367 struct domain *s, struct domain *t, u8 bus, u8 devfn)
368 {
369 return reassign_device(s, t, bus, devfn);
370 }
372 static int amd_iommu_add_device(struct pci_dev *pdev)
373 {
374 struct amd_iommu *iommu;
375 u16 bdf;
376 if ( !pdev->domain )
377 return -EINVAL;
379 bdf = (pdev->bus << 8) | pdev->devfn;
380 iommu = (bdf < ivrs_bdf_entries) ?
381 find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
383 if ( !iommu )
384 {
385 amd_iov_error("Fail to find iommu."
386 " %x:%x.%x cannot be assigned to domain %d\n",
387 pdev->bus, PCI_SLOT(pdev->devfn),
388 PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
389 return -ENODEV;
390 }
392 amd_iommu_setup_domain_device(pdev->domain, iommu, bdf);
393 return 0;
394 }
396 static int amd_iommu_remove_device(struct pci_dev *pdev)
397 {
398 struct amd_iommu *iommu;
399 u16 bdf;
400 if ( !pdev->domain )
401 return -EINVAL;
403 bdf = (pdev->bus << 8) | pdev->devfn;
404 iommu = (bdf < ivrs_bdf_entries) ?
405 find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
407 if ( !iommu )
408 {
409 amd_iov_error("Fail to find iommu."
410 " %x:%x.%x cannot be removed from domain %d\n",
411 pdev->bus, PCI_SLOT(pdev->devfn),
412 PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
413 return -ENODEV;
414 }
416 amd_iommu_disable_domain_device(pdev->domain, iommu, bdf);
417 return 0;
418 }
420 static int amd_iommu_group_id(u8 bus, u8 devfn)
421 {
422 int rt;
423 int bdf = (bus << 8) | devfn;
424 rt = ( bdf < ivrs_bdf_entries ) ?
425 ivrs_mappings[bdf].dte_requestor_id :
426 bdf;
427 return rt;
428 }
430 struct iommu_ops amd_iommu_ops = {
431 .init = amd_iommu_domain_init,
432 .add_device = amd_iommu_add_device,
433 .remove_device = amd_iommu_remove_device,
434 .assign_device = amd_iommu_assign_device,
435 .teardown = amd_iommu_domain_destroy,
436 .map_page = amd_iommu_map_page,
437 .unmap_page = amd_iommu_unmap_page,
438 .reassign_device = amd_iommu_return_device,
439 .get_device_group_id = amd_iommu_group_id,
440 .update_ire_from_apic = amd_iommu_ioapic_update_ire,
441 .update_ire_from_msi = amd_iommu_msi_msg_update_ire,
442 .read_apic_from_ire = amd_iommu_read_ioapic_from_ire,
443 .read_msi_from_ire = amd_iommu_read_msi_from_ire,
444 };