ia64/xen-unstable

view xen/drivers/passthrough/amd/iommu_init.c @ 19033:73770182aee4

AMD IOMMU: Reset tail and head pointer of cmd buffer and event log

Reset the tail and the head pointers of command buffer and event log
to zero in case that iommu does not reset them after the base
addresses of those buffers are updated.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 13 15:16:46 2009 +0000 (2009-01-13)
parents 1dfc48a8c361
children ab514cfbcdc5
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/config.h>
22 #include <xen/errno.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/amd-iommu.h>
26 #include <asm/msi.h>
27 #include <asm/hvm/svm/amd-iommu-proto.h>
28 #include <asm-x86/fixmap.h>
30 static struct amd_iommu *vector_to_iommu[NR_VECTORS];
31 static int nr_amd_iommus;
32 static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
33 static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
35 unsigned short ivrs_bdf_entries;
36 struct ivrs_mappings *ivrs_mappings;
37 struct list_head amd_iommu_head;
38 struct table_struct device_table;
40 extern void *int_remap_table;
41 extern spinlock_t int_remap_table_lock;
43 static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
44 {
45 unsigned long mfn;
47 if ( nr_amd_iommus > MAX_AMD_IOMMUS )
48 {
49 amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
50 return -ENOMEM;
51 }
53 iommu->mmio_base = (void *)fix_to_virt(
54 FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
55 mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);
56 map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
57 MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
59 memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
61 return 0;
62 }
64 static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
65 {
66 if ( iommu->mmio_base )
67 {
68 iounmap(iommu->mmio_base);
69 iommu->mmio_base = NULL;
70 }
71 }
73 static void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
74 {
75 u64 addr_64, addr_lo, addr_hi;
76 u32 entry;
78 addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
79 addr_lo = addr_64 & DMA_32BIT_MASK;
80 addr_hi = addr_64 >> 32;
82 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
83 IOMMU_DEV_TABLE_BASE_LOW_MASK,
84 IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
85 set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
86 entry, IOMMU_DEV_TABLE_SIZE_MASK,
87 IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
88 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
90 set_field_in_reg_u32((u32)addr_hi, 0,
91 IOMMU_DEV_TABLE_BASE_HIGH_MASK,
92 IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
93 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
94 }
96 static void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
97 {
98 u64 addr_64, addr_lo, addr_hi;
99 u32 power_of2_entries;
100 u32 entry;
102 addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
103 addr_lo = addr_64 & DMA_32BIT_MASK;
104 addr_hi = addr_64 >> 32;
106 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
107 IOMMU_CMD_BUFFER_BASE_LOW_MASK,
108 IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
109 writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
111 power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
112 IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
114 set_field_in_reg_u32((u32)addr_hi, 0,
115 IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
116 IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
117 set_field_in_reg_u32(power_of2_entries, entry,
118 IOMMU_CMD_BUFFER_LENGTH_MASK,
119 IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
120 writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
121 }
123 static void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
124 {
125 u64 addr_64, addr_lo, addr_hi;
126 u32 power_of2_entries;
127 u32 entry;
129 addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);
130 addr_lo = addr_64 & DMA_32BIT_MASK;
131 addr_hi = addr_64 >> 32;
133 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
134 IOMMU_EVENT_LOG_BASE_LOW_MASK,
135 IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);
136 writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
138 power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
139 IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
141 set_field_in_reg_u32((u32)addr_hi, 0,
142 IOMMU_EVENT_LOG_BASE_HIGH_MASK,
143 IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);
144 set_field_in_reg_u32(power_of2_entries, entry,
145 IOMMU_EVENT_LOG_LENGTH_MASK,
146 IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);
147 writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
148 }
150 static void __init set_iommu_translation_control(struct amd_iommu *iommu,
151 int enable)
152 {
153 u32 entry;
155 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
157 if ( enable )
158 {
159 set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
160 IOMMU_CONTROL_DISABLED, entry,
161 IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
162 IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
163 set_field_in_reg_u32(iommu->isochronous ? IOMMU_CONTROL_ENABLED :
164 IOMMU_CONTROL_DISABLED, entry,
165 IOMMU_CONTROL_ISOCHRONOUS_MASK,
166 IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry);
167 set_field_in_reg_u32(iommu->coherent ? IOMMU_CONTROL_ENABLED :
168 IOMMU_CONTROL_DISABLED, entry,
169 IOMMU_CONTROL_COHERENT_MASK,
170 IOMMU_CONTROL_COHERENT_SHIFT, &entry);
171 set_field_in_reg_u32(iommu->res_pass_pw ? IOMMU_CONTROL_ENABLED :
172 IOMMU_CONTROL_DISABLED, entry,
173 IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK,
174 IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry);
175 /* do not set PassPW bit */
176 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
177 IOMMU_CONTROL_PASS_POSTED_WRITE_MASK,
178 IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry);
179 }
180 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
181 IOMMU_CONTROL_DISABLED, entry,
182 IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
183 IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
184 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
185 }
187 static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
188 int enable)
189 {
190 u32 entry;
192 entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
193 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
194 IOMMU_CONTROL_DISABLED, entry,
195 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
196 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
197 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
199 /*reset head and tail pointer */
200 writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
201 writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
202 }
204 static void __init register_iommu_exclusion_range(struct amd_iommu *iommu)
205 {
206 u64 addr_lo, addr_hi;
207 u32 entry;
209 addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;
210 addr_hi = iommu->exclusion_limit >> 32;
212 set_field_in_reg_u32((u32)addr_hi, 0,
213 IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
214 IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
215 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
217 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
218 IOMMU_EXCLUSION_LIMIT_LOW_MASK,
219 IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
220 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
222 addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
223 addr_hi = iommu->exclusion_base >> 32;
225 set_field_in_reg_u32((u32)addr_hi, 0,
226 IOMMU_EXCLUSION_BASE_HIGH_MASK,
227 IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
228 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
230 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
231 IOMMU_EXCLUSION_BASE_LOW_MASK,
232 IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
234 set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
235 IOMMU_EXCLUSION_ALLOW_ALL_MASK,
236 IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
238 set_field_in_reg_u32(iommu->exclusion_enable, entry,
239 IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
240 IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
241 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
242 }
244 static void __init set_iommu_event_log_control(struct amd_iommu *iommu,
245 int enable)
246 {
247 u32 entry;
249 entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
250 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
251 IOMMU_CONTROL_DISABLED, entry,
252 IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
253 IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
254 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
256 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
257 IOMMU_CONTROL_DISABLED, entry,
258 IOMMU_CONTROL_EVENT_LOG_INT_MASK,
259 IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
260 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
262 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
263 IOMMU_CONTROL_COMP_WAIT_INT_MASK,
264 IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
265 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
267 /*reset head and tail pointer */
268 writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
269 writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
270 }
272 static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
273 {
274 u32 tail, head, *event_log;
275 int i;
277 BUG_ON( !iommu || !event );
279 /* make sure there's an entry in the log */
280 tail = get_field_from_reg_u32(
281 readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET),
282 IOMMU_EVENT_LOG_TAIL_MASK,
283 IOMMU_EVENT_LOG_TAIL_SHIFT);
284 if ( tail != iommu->event_log_head )
285 {
286 /* read event log entry */
287 event_log = (u32 *)(iommu->event_log.buffer +
288 (iommu->event_log_head *
289 IOMMU_EVENT_LOG_ENTRY_SIZE));
290 for ( i = 0; i < IOMMU_EVENT_LOG_U32_PER_ENTRY; i++ )
291 event[i] = event_log[i];
292 if ( ++iommu->event_log_head == iommu->event_log.entries )
293 iommu->event_log_head = 0;
295 /* update head pointer */
296 set_field_in_reg_u32(iommu->event_log_head, 0,
297 IOMMU_EVENT_LOG_HEAD_MASK,
298 IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
299 writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
300 return 0;
301 }
303 return -EFAULT;
304 }
306 static void amd_iommu_msi_data_init(struct amd_iommu *iommu)
307 {
308 u32 msi_data;
309 u8 bus = (iommu->bdf >> 8) & 0xff;
310 u8 dev = PCI_SLOT(iommu->bdf & 0xff);
311 u8 func = PCI_FUNC(iommu->bdf & 0xff);
312 int vector = iommu->vector;
314 msi_data = MSI_DATA_TRIGGER_EDGE |
315 MSI_DATA_LEVEL_ASSERT |
316 MSI_DATA_DELIVERY_FIXED |
317 MSI_DATA_VECTOR(vector);
319 pci_conf_write32(bus, dev, func,
320 iommu->msi_cap + PCI_MSI_DATA_64, msi_data);
321 }
323 static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu)
324 {
326 int bus = (iommu->bdf >> 8) & 0xff;
327 int dev = PCI_SLOT(iommu->bdf & 0xff);
328 int func = PCI_FUNC(iommu->bdf & 0xff);
330 u32 address_hi = 0;
331 u32 address_lo = MSI_ADDR_HEADER |
332 MSI_ADDR_DESTMODE_PHYS |
333 MSI_ADDR_REDIRECTION_CPU |
334 MSI_ADDR_DEST_ID(phy_cpu);
336 pci_conf_write32(bus, dev, func,
337 iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);
338 pci_conf_write32(bus, dev, func,
339 iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);
340 }
342 static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
343 {
344 u16 control;
345 int bus = (iommu->bdf >> 8) & 0xff;
346 int dev = PCI_SLOT(iommu->bdf & 0xff);
347 int func = PCI_FUNC(iommu->bdf & 0xff);
349 control = pci_conf_read16(bus, dev, func,
350 iommu->msi_cap + PCI_MSI_FLAGS);
351 control &= ~(1);
352 if ( flag )
353 control |= flag;
354 pci_conf_write16(bus, dev, func,
355 iommu->msi_cap + PCI_MSI_FLAGS, control);
356 }
358 static void iommu_msi_unmask(unsigned int vector)
359 {
360 unsigned long flags;
361 struct amd_iommu *iommu = vector_to_iommu[vector];
363 /* FIXME: do not support mask bits at the moment */
364 if ( iommu->maskbit )
365 return;
367 spin_lock_irqsave(&iommu->lock, flags);
368 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
369 spin_unlock_irqrestore(&iommu->lock, flags);
370 }
372 static void iommu_msi_mask(unsigned int vector)
373 {
374 unsigned long flags;
375 struct amd_iommu *iommu = vector_to_iommu[vector];
377 /* FIXME: do not support mask bits at the moment */
378 if ( iommu->maskbit )
379 return;
381 spin_lock_irqsave(&iommu->lock, flags);
382 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
383 spin_unlock_irqrestore(&iommu->lock, flags);
384 }
386 static unsigned int iommu_msi_startup(unsigned int vector)
387 {
388 iommu_msi_unmask(vector);
389 return 0;
390 }
392 static void iommu_msi_end(unsigned int vector)
393 {
394 iommu_msi_unmask(vector);
395 ack_APIC_irq();
396 }
398 static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest)
399 {
400 struct amd_iommu *iommu = vector_to_iommu[vector];
401 amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
402 }
404 static struct hw_interrupt_type iommu_msi_type = {
405 .typename = "AMD_IOV_MSI",
406 .startup = iommu_msi_startup,
407 .shutdown = iommu_msi_mask,
408 .enable = iommu_msi_unmask,
409 .disable = iommu_msi_mask,
410 .ack = iommu_msi_mask,
411 .end = iommu_msi_end,
412 .set_affinity = iommu_msi_set_affinity,
413 };
415 static void parse_event_log_entry(u32 entry[])
416 {
417 u16 domain_id, device_id;
418 u32 code;
419 u64 *addr;
420 char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
421 "IO_PAGE_FALT",
422 "DEV_TABLE_HW_ERROR",
423 "PAGE_TABLE_HW_ERROR",
424 "ILLEGAL_COMMAND_ERROR",
425 "COMMAND_HW_ERROR",
426 "IOTLB_INV_TIMEOUT",
427 "INVALID_DEV_REQUEST"};
429 code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK,
430 IOMMU_EVENT_CODE_SHIFT);
432 if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) ||
433 (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
434 {
435 amd_iov_error("Invalid event log entry!\n");
436 return;
437 }
439 if ( code == IOMMU_EVENT_IO_PAGE_FALT )
440 {
441 device_id = get_field_from_reg_u32(entry[0],
442 IOMMU_EVENT_DEVICE_ID_MASK,
443 IOMMU_EVENT_DEVICE_ID_SHIFT);
444 domain_id = get_field_from_reg_u32(entry[1],
445 IOMMU_EVENT_DOMAIN_ID_MASK,
446 IOMMU_EVENT_DOMAIN_ID_SHIFT);
447 addr= (u64*) (entry + 2);
448 printk(XENLOG_ERR "AMD_IOV: "
449 "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n",
450 event_str[code-1], domain_id, device_id, *addr);
451 }
452 }
454 static void amd_iommu_page_fault(int vector, void *dev_id,
455 struct cpu_user_regs *regs)
456 {
457 u32 event[4];
458 u32 entry;
459 unsigned long flags;
460 int ret = 0;
461 struct amd_iommu *iommu = dev_id;
463 spin_lock_irqsave(&iommu->lock, flags);
464 ret = amd_iommu_read_event_log(iommu, event);
465 /* reset interrupt status bit */
466 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
467 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
468 IOMMU_STATUS_EVENT_LOG_INT_MASK,
469 IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
470 writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
471 spin_unlock_irqrestore(&iommu->lock, flags);
473 if ( ret != 0 )
474 return;
475 parse_event_log_entry(event);
476 }
478 static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
479 {
480 int vector, ret;
482 vector = assign_irq_vector(AUTO_ASSIGN);
483 vector_to_iommu[vector] = iommu;
485 /* make irq == vector */
486 irq_vector[vector] = vector;
487 vector_irq[vector] = vector;
489 if ( !vector )
490 {
491 amd_iov_error("no vectors\n");
492 return 0;
493 }
495 irq_desc[vector].handler = &iommu_msi_type;
496 ret = request_irq(vector, amd_iommu_page_fault, 0, "amd_iommu", iommu);
497 if ( ret )
498 {
499 amd_iov_error("can't request irq\n");
500 return 0;
501 }
502 iommu->vector = vector;
503 return vector;
504 }
506 void __init enable_iommu(struct amd_iommu *iommu)
507 {
508 unsigned long flags;
510 spin_lock_irqsave(&iommu->lock, flags);
512 if ( iommu->enabled )
513 {
514 spin_unlock_irqrestore(&iommu->lock, flags);
515 return;
516 }
518 iommu->dev_table.alloc_size = device_table.alloc_size;
519 iommu->dev_table.entries = device_table.entries;
520 iommu->dev_table.buffer = device_table.buffer;
522 register_iommu_dev_table_in_mmio_space(iommu);
523 register_iommu_cmd_buffer_in_mmio_space(iommu);
524 register_iommu_event_log_in_mmio_space(iommu);
525 register_iommu_exclusion_range(iommu);
527 amd_iommu_msi_data_init (iommu);
528 amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
529 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
531 set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
532 set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
533 set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
535 printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus );
536 nr_amd_iommus++;
538 iommu->enabled = 1;
539 spin_unlock_irqrestore(&iommu->lock, flags);
541 }
543 static void __init deallocate_iommu_table_struct(
544 struct table_struct *table)
545 {
546 int order = 0;
547 if ( table->buffer )
548 {
549 order = get_order_from_bytes(table->alloc_size);
550 __free_amd_iommu_tables(table->buffer, order);
551 table->buffer = NULL;
552 }
553 }
555 static void __init deallocate_iommu_tables(struct amd_iommu *iommu)
556 {
557 deallocate_iommu_table_struct(&iommu->cmd_buffer);
558 deallocate_iommu_table_struct(&iommu->event_log);
559 }
561 static int __init allocate_iommu_table_struct(struct table_struct *table,
562 const char *name)
563 {
564 int order = 0;
565 if ( table->buffer == NULL )
566 {
567 order = get_order_from_bytes(table->alloc_size);
568 table->buffer = __alloc_amd_iommu_tables(order);
570 if ( table->buffer == NULL )
571 {
572 amd_iov_error("Error allocating %s\n", name);
573 return -ENOMEM;
574 }
575 memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
576 }
577 return 0;
578 }
580 static int __init allocate_iommu_tables(struct amd_iommu *iommu)
581 {
582 /* allocate 'command buffer' in power of 2 increments of 4K */
583 iommu->cmd_buffer_tail = 0;
584 iommu->cmd_buffer.alloc_size = PAGE_SIZE << get_order_from_bytes(
585 PAGE_ALIGN(amd_iommu_cmd_buffer_entries * IOMMU_CMD_BUFFER_ENTRY_SIZE));
586 iommu->cmd_buffer.entries =
587 iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
589 if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 0 )
590 goto error_out;
592 /* allocate 'event log' in power of 2 increments of 4K */
593 iommu->event_log_head = 0;
594 iommu->event_log.alloc_size = PAGE_SIZE << get_order_from_bytes(
595 PAGE_ALIGN(amd_iommu_event_log_entries * IOMMU_EVENT_LOG_ENTRY_SIZE));
596 iommu->event_log.entries =
597 iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE;
599 if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 )
600 goto error_out;
602 return 0;
604 error_out:
605 deallocate_iommu_tables(iommu);
606 return -ENOMEM;
607 }
609 int __init amd_iommu_init_one(struct amd_iommu *iommu)
610 {
612 if ( allocate_iommu_tables(iommu) != 0 )
613 goto error_out;
615 if ( map_iommu_mmio_region(iommu) != 0 )
616 goto error_out;
618 if ( set_iommu_interrupt_handler(iommu) == 0 )
619 goto error_out;
621 enable_iommu(iommu);
622 return 0;
624 error_out:
625 return -ENODEV;
626 }
628 void __init amd_iommu_init_cleanup(void)
629 {
630 struct amd_iommu *iommu, *next;
632 list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
633 {
634 list_del(&iommu->list);
635 if ( iommu->enabled )
636 {
637 deallocate_iommu_tables(iommu);
638 unmap_iommu_mmio_region(iommu);
639 }
640 xfree(iommu);
641 }
642 }
644 static int __init init_ivrs_mapping(void)
645 {
646 int bdf;
648 BUG_ON( !ivrs_bdf_entries );
650 ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
651 if ( ivrs_mappings == NULL )
652 {
653 amd_iov_error("Error allocating IVRS Mappings table\n");
654 return -ENOMEM;
655 }
656 memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings));
658 /* assign default values for device entries */
659 for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
660 {
661 ivrs_mappings[bdf].dte_requestor_id = bdf;
662 ivrs_mappings[bdf].dte_sys_mgt_enable =
663 IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
664 ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED;
665 ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED;
666 ivrs_mappings[bdf].iommu = NULL;
667 }
668 return 0;
669 }
671 static int __init amd_iommu_setup_device_table(void)
672 {
673 /* allocate 'device table' on a 4K boundary */
674 device_table.alloc_size = PAGE_SIZE << get_order_from_bytes(
675 PAGE_ALIGN(ivrs_bdf_entries * IOMMU_DEV_TABLE_ENTRY_SIZE));
676 device_table.entries = device_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
678 return ( allocate_iommu_table_struct(&device_table, "Device Table") );
679 }
681 int __init amd_iommu_setup_shared_tables(void)
682 {
683 BUG_ON( !ivrs_bdf_entries );
685 if (init_ivrs_mapping() != 0 )
686 goto error_out;
688 if ( amd_iommu_setup_device_table() != 0 )
689 goto error_out;
691 if ( amd_iommu_setup_intremap_table() != 0 )
692 goto error_out;
694 return 0;
696 error_out:
697 deallocate_intremap_table();
698 deallocate_iommu_table_struct(&device_table);
700 if ( ivrs_mappings )
701 {
702 xfree(ivrs_mappings);
703 ivrs_mappings = NULL;
704 }
705 return -ENOMEM;
706 }