ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 19602:40d4267296ad

vt-d: Fix interrupt remapping for multiple IOAPICs

Current IOAPIC interrupt remapping code assumes there is only one
IOAPIC in system. It brings problem when there are more than one
IOAPIC in system. This patch extends ioapic_pin_to_intremap_index[]
array to handle multiple IOAPICs case.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 15 08:12:39 2009 +0100 (2009-05-15)
parents 115c164721dc
children 07cf79dfb59c
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/list.h>
27 #include <xen/pci.h>
28 #include <xen/pci_regs.h>
29 #include "iommu.h"
30 #include "dmar.h"
31 #include "vtd.h"
32 #include "extern.h"
34 #ifndef dest_SMI
35 #define dest_SMI -1
36 #endif
38 /* The max number of IOAPIC (or IOSAPIC) pin. The typical values can be 24 or
39 * 48 on x86 and Itanium platforms. Here we use a biger number 256. This
40 * should be big enough. Actually now IREMAP_ENTRY_NR is also 256.
41 */
42 #define MAX_IOAPIC_PIN_NUM 256
44 struct ioapicid_pin_intremap_index {
45 struct list_head list;
46 unsigned int ioapic_id;
47 unsigned int pin;
48 int intremap_index;
49 };
51 static struct list_head ioapic_pin_to_intremap_index[MAX_IOAPIC_PIN_NUM];
53 static int init_ioapic_pin_intremap_index(void)
54 {
55 static int initialized = 0;
56 int i;
58 if ( initialized == 1 )
59 return 0;
61 for ( i = 0; i < MAX_IOAPIC_PIN_NUM; i++ )
62 INIT_LIST_HEAD(&ioapic_pin_to_intremap_index[i]);
64 initialized = 1;
65 return 0;
66 }
68 static int get_ioapic_pin_intremap_index(unsigned int ioapic_id,
69 unsigned int pin)
70 {
71 struct ioapicid_pin_intremap_index *entry;
72 struct list_head *pos, *tmp;
74 list_for_each_safe ( pos, tmp, &ioapic_pin_to_intremap_index[pin] )
75 {
76 entry = list_entry(pos, struct ioapicid_pin_intremap_index, list);
77 if ( entry->ioapic_id == ioapic_id )
78 return entry->intremap_index;
79 }
81 return -1;
82 }
84 static int set_ioapic_pin_intremap_index(unsigned int ioapic_id,
85 unsigned int pin,
86 int index)
87 {
88 struct ioapicid_pin_intremap_index *entry;
90 entry = xmalloc(struct ioapicid_pin_intremap_index);
91 if ( !entry )
92 return -ENOMEM;
94 entry->ioapic_id = ioapic_id;
95 entry->pin = pin;
96 entry->intremap_index = index;
98 list_add_tail(&entry->list, &ioapic_pin_to_intremap_index[pin]);
100 return 0;
101 }
103 u16 apicid_to_bdf(int apic_id)
104 {
105 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
106 struct acpi_ioapic_unit *acpi_ioapic_unit;
108 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
109 if ( acpi_ioapic_unit->apic_id == apic_id )
110 return acpi_ioapic_unit->ioapic.info;
112 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
113 return 0;
114 }
116 static int remap_entry_to_ioapic_rte(
117 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
118 {
119 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
120 struct IO_APIC_route_remap_entry *remap_rte;
121 int index = 0;
122 unsigned long flags;
123 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
125 if ( ir_ctrl == NULL )
126 {
127 dprintk(XENLOG_ERR VTDPREFIX,
128 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
129 return -EFAULT;
130 }
132 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
133 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
135 if ( index > ir_ctrl->iremap_index )
136 {
137 dprintk(XENLOG_ERR VTDPREFIX,
138 "%s: index (%d) is larger than remap table entry size (%d)!\n",
139 __func__, index, ir_ctrl->iremap_index);
140 return -EFAULT;
141 }
143 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
145 iremap_entries =
146 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
147 iremap_entry = &iremap_entries[index];
149 old_rte->vector = iremap_entry->lo.vector;
150 old_rte->delivery_mode = iremap_entry->lo.dlm;
151 old_rte->dest_mode = iremap_entry->lo.dm;
152 old_rte->trigger = iremap_entry->lo.tm;
153 old_rte->__reserved_2 = 0;
154 old_rte->dest.logical.__reserved_1 = 0;
155 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
157 unmap_vtd_domain_page(iremap_entries);
158 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
159 return 0;
160 }
162 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
163 int apic_id, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
164 unsigned int rte_upper, unsigned int value)
165 {
166 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
167 struct iremap_entry new_ire;
168 struct IO_APIC_route_remap_entry *remap_rte;
169 struct IO_xAPIC_route_entry new_rte;
170 int index;
171 unsigned long flags;
172 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
174 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
175 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
177 index = get_ioapic_pin_intremap_index(apic_id, ioapic_pin);
178 if ( index < 0 )
179 {
180 ir_ctrl->iremap_index++;
181 index = ir_ctrl->iremap_index;
182 set_ioapic_pin_intremap_index(apic_id, ioapic_pin, index);
183 }
185 if ( index > IREMAP_ENTRY_NR - 1 )
186 {
187 dprintk(XENLOG_ERR VTDPREFIX,
188 "%s: intremap index (%d) is larger than"
189 " the maximum index (%ld)!\n",
190 __func__, index, IREMAP_ENTRY_NR - 1);
191 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
192 return -EFAULT;
193 }
195 iremap_entries =
196 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
197 iremap_entry = &iremap_entries[index];
199 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
201 if ( rte_upper )
202 {
203 #if defined(__i386__) || defined(__x86_64__)
204 new_ire.lo.dst = (value >> 24) << 8;
205 #else /* __ia64__ */
206 new_ire.lo.dst = value >> 16;
207 #endif
208 }
209 else
210 {
211 *(((u32 *)&new_rte) + 0) = value;
212 new_ire.lo.fpd = 0;
213 new_ire.lo.dm = new_rte.dest_mode;
214 new_ire.lo.rh = 0;
215 new_ire.lo.tm = new_rte.trigger;
216 new_ire.lo.dlm = new_rte.delivery_mode;
217 new_ire.lo.avail = 0;
218 new_ire.lo.res_1 = 0;
219 new_ire.lo.vector = new_rte.vector;
220 new_ire.lo.res_2 = 0;
221 new_ire.hi.sid = apicid_to_bdf(apic_id);
223 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
224 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
225 new_ire.hi.res_1 = 0;
226 new_ire.lo.p = 1; /* finally, set present bit */
228 /* now construct new ioapic rte entry */
229 remap_rte->vector = new_rte.vector;
230 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
231 remap_rte->index_15 = (index >> 15) & 0x1;
232 remap_rte->index_0_14 = index & 0x7fff;
234 remap_rte->delivery_status = new_rte.delivery_status;
235 remap_rte->polarity = new_rte.polarity;
236 remap_rte->irr = new_rte.irr;
237 remap_rte->trigger = new_rte.trigger;
238 remap_rte->mask = new_rte.mask;
239 remap_rte->reserved = 0;
240 remap_rte->format = 1; /* indicate remap format */
241 }
243 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
244 iommu_flush_cache_entry(iremap_entry);
245 iommu_flush_iec_index(iommu, 0, index);
246 invalidate_sync(iommu);
248 unmap_vtd_domain_page(iremap_entries);
249 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
250 return 0;
251 }
253 unsigned int io_apic_read_remap_rte(
254 unsigned int apic, unsigned int reg)
255 {
256 struct IO_xAPIC_route_entry old_rte = { 0 };
257 struct IO_APIC_route_remap_entry *remap_rte;
258 int rte_upper = (reg & 1) ? 1 : 0;
259 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
260 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
262 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
263 ir_ctrl->iremap_index == -1 )
264 {
265 *IO_APIC_BASE(apic) = reg;
266 return *(IO_APIC_BASE(apic)+4);
267 }
269 if ( rte_upper )
270 reg--;
272 /* read lower and upper 32-bits of rte entry */
273 *IO_APIC_BASE(apic) = reg;
274 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
275 *IO_APIC_BASE(apic) = reg + 1;
276 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
278 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
280 if ( (remap_rte->format == 0) || (old_rte.delivery_mode == dest_SMI) )
281 {
282 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
283 return *(IO_APIC_BASE(apic)+4);
284 }
286 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
287 {
288 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
289 return *(IO_APIC_BASE(apic)+4);
290 }
292 if ( rte_upper )
293 return (*(((u32 *)&old_rte) + 1));
294 else
295 return (*(((u32 *)&old_rte) + 0));
296 }
298 void io_apic_write_remap_rte(
299 unsigned int apic, unsigned int reg, unsigned int value)
300 {
301 unsigned int ioapic_pin = (reg - 0x10) / 2;
302 struct IO_xAPIC_route_entry old_rte = { 0 };
303 struct IO_APIC_route_remap_entry *remap_rte;
304 unsigned int rte_upper = (reg & 1) ? 1 : 0;
305 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
306 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
307 int saved_mask;
309 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
310 {
311 *IO_APIC_BASE(apic) = reg;
312 *(IO_APIC_BASE(apic)+4) = value;
313 return;
314 }
316 if ( rte_upper )
317 reg--;
319 /* read both lower and upper 32-bits of rte entry */
320 *IO_APIC_BASE(apic) = reg;
321 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
322 *IO_APIC_BASE(apic) = reg + 1;
323 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
325 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
327 if ( old_rte.delivery_mode == dest_SMI )
328 {
329 /* Some BIOS does not zero out reserve fields in IOAPIC
330 * RTE's. clear_IO_APIC() zeroes out all RTE's except for RTE
331 * with MSI delivery type. This is a problem when the host
332 * OS converts SMI delivery type to some other type but leaving
333 * the reserved field uninitialized. This can cause interrupt
334 * remapping table out of bound error if "format" field is 1
335 * and the "index" field has a value that that is larger than
336 * the maximum index of interrupt remapping table.
337 */
338 if ( remap_rte->format == 1 )
339 {
340 remap_rte->format = 0;
341 *IO_APIC_BASE(apic) = reg;
342 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
343 *IO_APIC_BASE(apic) = reg + 1;
344 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
345 }
347 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
348 *(IO_APIC_BASE(apic)+4) = value;
349 return;
350 }
352 /* mask the interrupt while we change the intremap table */
353 saved_mask = remap_rte->mask;
354 remap_rte->mask = 1;
355 *IO_APIC_BASE(apic) = reg;
356 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
357 remap_rte->mask = saved_mask;
359 ASSERT(ioapic_pin < MAX_IOAPIC_PIN_NUM);
360 if ( ioapic_rte_to_remap_entry(iommu, IO_APIC_ID(apic), ioapic_pin,
361 &old_rte, rte_upper, value) )
362 {
363 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
364 *(IO_APIC_BASE(apic)+4) = value;
365 return;
366 }
368 /* write new entry to ioapic */
369 *IO_APIC_BASE(apic) = reg;
370 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
371 *IO_APIC_BASE(apic) = reg + 1;
372 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
373 }
375 #if defined(__i386__) || defined(__x86_64__)
376 static int remap_entry_to_msi_msg(
377 struct iommu *iommu, struct msi_msg *msg)
378 {
379 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
380 struct msi_msg_remap_entry *remap_rte;
381 int index;
382 unsigned long flags;
383 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
385 if ( ir_ctrl == NULL )
386 {
387 dprintk(XENLOG_ERR VTDPREFIX,
388 "remap_entry_to_msi_msg: ir_ctl == NULL");
389 return -EFAULT;
390 }
392 remap_rte = (struct msi_msg_remap_entry *) msg;
393 index = (remap_rte->address_lo.index_15 << 15) |
394 remap_rte->address_lo.index_0_14;
396 if ( index > ir_ctrl->iremap_index )
397 {
398 dprintk(XENLOG_ERR VTDPREFIX,
399 "%s: index (%d) is larger than remap table entry size (%d)\n",
400 __func__, index, ir_ctrl->iremap_index);
401 return -EFAULT;
402 }
404 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
406 iremap_entries =
407 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
408 iremap_entry = &iremap_entries[index];
410 msg->address_hi = MSI_ADDR_BASE_HI;
411 msg->address_lo =
412 MSI_ADDR_BASE_LO |
413 ((iremap_entry->lo.dm == 0) ?
414 MSI_ADDR_DESTMODE_PHYS:
415 MSI_ADDR_DESTMODE_LOGIC) |
416 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
417 MSI_ADDR_REDIRECTION_CPU:
418 MSI_ADDR_REDIRECTION_LOWPRI) |
419 iremap_entry->lo.dst >> 8;
421 msg->data =
422 MSI_DATA_TRIGGER_EDGE |
423 MSI_DATA_LEVEL_ASSERT |
424 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
425 MSI_DATA_DELIVERY_FIXED:
426 MSI_DATA_DELIVERY_LOWPRI) |
427 iremap_entry->lo.vector;
429 unmap_vtd_domain_page(iremap_entries);
430 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
431 return 0;
432 }
434 static int msi_msg_to_remap_entry(
435 struct iommu *iommu, struct pci_dev *pdev,
436 struct msi_desc *msi_desc, struct msi_msg *msg)
437 {
438 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
439 struct iremap_entry new_ire;
440 struct msi_msg_remap_entry *remap_rte;
441 unsigned int index;
442 unsigned long flags;
443 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
445 remap_rte = (struct msi_msg_remap_entry *) msg;
446 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
448 if ( msi_desc->remap_index < 0 )
449 {
450 ir_ctrl->iremap_index++;
451 index = ir_ctrl->iremap_index;
452 msi_desc->remap_index = index;
453 }
454 else
455 index = msi_desc->remap_index;
457 if ( index > IREMAP_ENTRY_NR - 1 )
458 {
459 dprintk(XENLOG_ERR VTDPREFIX,
460 "%s: intremap index (%d) is larger than"
461 " the maximum index (%ld)!\n",
462 __func__, index, IREMAP_ENTRY_NR - 1);
463 msi_desc->remap_index = -1;
464 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
465 return -EFAULT;
466 }
468 iremap_entries =
469 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
470 iremap_entry = &iremap_entries[index];
471 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
473 /* Set interrupt remapping table entry */
474 new_ire.lo.fpd = 0;
475 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
476 new_ire.lo.rh = 0;
477 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
478 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
479 new_ire.lo.avail = 0;
480 new_ire.lo.res_1 = 0;
481 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
482 MSI_DATA_VECTOR_MASK;
483 new_ire.lo.res_2 = 0;
484 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
485 & 0xff) << 8;
487 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
488 new_ire.hi.sq = 0;
489 new_ire.hi.svt = 1;
490 new_ire.hi.res_1 = 0;
491 new_ire.lo.p = 1; /* finally, set present bit */
493 /* now construct new MSI/MSI-X rte entry */
494 remap_rte->address_lo.dontcare = 0;
495 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
496 remap_rte->address_lo.index_0_14 = index & 0x7fff;
497 remap_rte->address_lo.SHV = 1;
498 remap_rte->address_lo.format = 1;
500 remap_rte->address_hi = 0;
501 remap_rte->data = 0;
503 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
504 iommu_flush_cache_entry(iremap_entry);
505 iommu_flush_iec_index(iommu, 0, index);
506 invalidate_sync(iommu);
508 unmap_vtd_domain_page(iremap_entries);
509 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
510 return 0;
511 }
513 void msi_msg_read_remap_rte(
514 struct msi_desc *msi_desc, struct msi_msg *msg)
515 {
516 struct pci_dev *pdev = msi_desc->dev;
517 struct acpi_drhd_unit *drhd = NULL;
518 struct iommu *iommu = NULL;
519 struct ir_ctrl *ir_ctrl;
521 drhd = acpi_find_matched_drhd_unit(pdev);
522 iommu = drhd->iommu;
524 ir_ctrl = iommu_ir_ctrl(iommu);
525 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
526 return;
528 remap_entry_to_msi_msg(iommu, msg);
529 }
531 void msi_msg_write_remap_rte(
532 struct msi_desc *msi_desc, struct msi_msg *msg)
533 {
534 struct pci_dev *pdev = msi_desc->dev;
535 struct acpi_drhd_unit *drhd = NULL;
536 struct iommu *iommu = NULL;
537 struct ir_ctrl *ir_ctrl;
539 drhd = acpi_find_matched_drhd_unit(pdev);
540 iommu = drhd->iommu;
542 ir_ctrl = iommu_ir_ctrl(iommu);
543 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
544 return;
546 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
547 }
548 #elif defined(__ia64__)
549 void msi_msg_read_remap_rte(
550 struct msi_desc *msi_desc, struct msi_msg *msg)
551 {
552 /* TODO. */
553 }
555 void msi_msg_write_remap_rte(
556 struct msi_desc *msi_desc, struct msi_msg *msg)
557 {
558 /* TODO. */
559 }
560 #endif
562 int enable_intremap(struct iommu *iommu)
563 {
564 struct ir_ctrl *ir_ctrl;
565 s_time_t start_time;
567 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
569 ir_ctrl = iommu_ir_ctrl(iommu);
570 if ( ir_ctrl->iremap_maddr == 0 )
571 {
572 ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
573 if ( ir_ctrl->iremap_maddr == 0 )
574 {
575 dprintk(XENLOG_WARNING VTDPREFIX,
576 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
577 return -ENOMEM;
578 }
579 ir_ctrl->iremap_index = -1;
580 }
582 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
583 /* set extended interrupt mode bit */
584 ir_ctrl->iremap_maddr |=
585 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
586 #endif
587 /* set size of the interrupt remapping table */
588 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
589 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
591 /* set SIRTP */
592 iommu->gcmd |= DMA_GCMD_SIRTP;
593 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
595 /* Make sure hardware complete it */
596 start_time = NOW();
597 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
598 {
599 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
600 panic("Cannot set SIRTP field for interrupt remapping\n");
601 cpu_relax();
602 }
604 /* enable comaptiblity format interrupt pass through */
605 iommu->gcmd |= DMA_GCMD_CFI;
606 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
608 start_time = NOW();
609 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
610 {
611 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
612 panic("Cannot set CFI field for interrupt remapping\n");
613 cpu_relax();
614 }
616 /* enable interrupt remapping hardware */
617 iommu->gcmd |= DMA_GCMD_IRE;
618 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
620 start_time = NOW();
621 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
622 {
623 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
624 panic("Cannot set IRE field for interrupt remapping\n");
625 cpu_relax();
626 }
628 /* After set SIRTP, we should do globally invalidate the IEC */
629 iommu_flush_iec_global(iommu);
631 init_ioapic_pin_intremap_index();
633 return 0;
634 }
636 void disable_intremap(struct iommu *iommu)
637 {
638 s_time_t start_time;
640 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
642 iommu->gcmd &= ~(DMA_GCMD_SIRTP | DMA_GCMD_CFI | DMA_GCMD_IRE);
643 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
645 start_time = NOW();
646 while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES )
647 {
648 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
649 panic("Cannot clear IRE field for interrupt remapping\n");
650 cpu_relax();
651 }
652 }