ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 19752:fa51db0871e1

vtd: Fix flush for SRTP and SIRTP set

SRTP (Set Root Table Pointer) operation must be set before enable or
re-enable DMA remapping. And after set it, software must globally
invalidate the context-cache and then globally invalidate the
IOTLB. This is required to ensure hardware uses only the remapping
structures referenced by the new root-table pointer, and not stale
cached entries. Similarly, SIRTP (Set Interrupt Remap Table Pointer)
operation must be set before enable or re-enable Interrupt
remapping, and after set it, software must globally invalidate the
interrupt entry cache. This patch adds global context and iotlb
flush after set root entry, and globally flushs interrupt entry
cache before enabling Interrupt remapping. And remove the
iommu_flush_all in iommu_resume becuase it becomes redundant after
adds flush for SRTP in init_vtd_hw.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 11:30:45 2009 +0100 (2009-06-16)
parents 4fb8a6c993e2
children cc07094a02e4
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/list.h>
27 #include <xen/pci.h>
28 #include <xen/pci_regs.h>
29 #include "iommu.h"
30 #include "dmar.h"
31 #include "vtd.h"
32 #include "extern.h"
34 #ifdef __ia64__
35 #define dest_SMI -1
36 #define nr_ioapics iosapic_get_nr_iosapics()
37 #define nr_ioapic_registers(i) iosapic_get_nr_pins(i)
38 #else
39 #define nr_ioapic_registers(i) nr_ioapic_registers[i]
40 #endif
42 /* apic_pin_2_ir_idx[apicid][pin] = interrupt remapping table index */
43 static unsigned int **apic_pin_2_ir_idx;
45 static int init_apic_pin_2_ir_idx(void)
46 {
47 unsigned int *_apic_pin_2_ir_idx;
48 unsigned int nr_pins, i;
50 nr_pins = 0;
51 for ( i = 0; i < nr_ioapics; i++ )
52 nr_pins += nr_ioapic_registers(i);
54 _apic_pin_2_ir_idx = xmalloc_array(unsigned int, nr_pins);
55 apic_pin_2_ir_idx = xmalloc_array(unsigned int *, nr_ioapics);
56 if ( (_apic_pin_2_ir_idx == NULL) || (apic_pin_2_ir_idx == NULL) )
57 {
58 xfree(_apic_pin_2_ir_idx);
59 xfree(apic_pin_2_ir_idx);
60 return -ENOMEM;
61 }
63 for ( i = 0; i < nr_pins; i++ )
64 _apic_pin_2_ir_idx[i] = -1;
66 nr_pins = 0;
67 for ( i = 0; i < nr_ioapics; i++ )
68 {
69 apic_pin_2_ir_idx[i] = &_apic_pin_2_ir_idx[nr_pins];
70 nr_pins += nr_ioapic_registers(i);
71 }
73 return 0;
74 }
76 u16 apicid_to_bdf(int apic_id)
77 {
78 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
79 struct acpi_ioapic_unit *acpi_ioapic_unit;
81 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
82 if ( acpi_ioapic_unit->apic_id == apic_id )
83 return acpi_ioapic_unit->ioapic.info;
85 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
86 return 0;
87 }
89 static int remap_entry_to_ioapic_rte(
90 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
91 {
92 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
93 struct IO_APIC_route_remap_entry *remap_rte;
94 int index = 0;
95 unsigned long flags;
96 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
98 if ( ir_ctrl == NULL )
99 {
100 dprintk(XENLOG_ERR VTDPREFIX,
101 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
102 return -EFAULT;
103 }
105 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
106 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
108 if ( index > ir_ctrl->iremap_index )
109 {
110 dprintk(XENLOG_ERR VTDPREFIX,
111 "%s: index (%d) is larger than remap table entry size (%d)!\n",
112 __func__, index, ir_ctrl->iremap_index);
113 return -EFAULT;
114 }
116 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
118 iremap_entries =
119 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
120 iremap_entry = &iremap_entries[index];
122 old_rte->vector = iremap_entry->lo.vector;
123 old_rte->delivery_mode = iremap_entry->lo.dlm;
124 old_rte->dest_mode = iremap_entry->lo.dm;
125 old_rte->trigger = iremap_entry->lo.tm;
126 old_rte->__reserved_2 = 0;
127 old_rte->dest.logical.__reserved_1 = 0;
128 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
130 unmap_vtd_domain_page(iremap_entries);
131 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
132 return 0;
133 }
135 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
136 int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
137 unsigned int rte_upper, unsigned int value)
138 {
139 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
140 struct iremap_entry new_ire;
141 struct IO_APIC_route_remap_entry *remap_rte;
142 struct IO_xAPIC_route_entry new_rte;
143 int index;
144 unsigned long flags;
145 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
147 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
148 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
150 index = apic_pin_2_ir_idx[apic][ioapic_pin];
151 if ( index < 0 )
152 {
153 ir_ctrl->iremap_index++;
154 index = ir_ctrl->iremap_index;
155 apic_pin_2_ir_idx[apic][ioapic_pin] = index;
156 }
158 if ( index > IREMAP_ENTRY_NR - 1 )
159 {
160 dprintk(XENLOG_ERR VTDPREFIX,
161 "%s: intremap index (%d) is larger than"
162 " the maximum index (%ld)!\n",
163 __func__, index, IREMAP_ENTRY_NR - 1);
164 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
165 return -EFAULT;
166 }
168 iremap_entries =
169 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
170 iremap_entry = &iremap_entries[index];
172 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
174 if ( rte_upper )
175 {
176 #if defined(__i386__) || defined(__x86_64__)
177 new_ire.lo.dst = (value >> 24) << 8;
178 #else /* __ia64__ */
179 new_ire.lo.dst = value >> 16;
180 #endif
181 }
182 else
183 {
184 *(((u32 *)&new_rte) + 0) = value;
185 new_ire.lo.fpd = 0;
186 new_ire.lo.dm = new_rte.dest_mode;
187 new_ire.lo.rh = 0;
188 new_ire.lo.tm = new_rte.trigger;
189 new_ire.lo.dlm = new_rte.delivery_mode;
190 new_ire.lo.avail = 0;
191 new_ire.lo.res_1 = 0;
192 new_ire.lo.vector = new_rte.vector;
193 new_ire.lo.res_2 = 0;
194 new_ire.hi.sid = apicid_to_bdf(IO_APIC_ID(apic));
196 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
197 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
198 new_ire.hi.res_1 = 0;
199 new_ire.lo.p = 1; /* finally, set present bit */
201 /* now construct new ioapic rte entry */
202 remap_rte->vector = new_rte.vector;
203 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
204 remap_rte->index_15 = (index >> 15) & 0x1;
205 remap_rte->index_0_14 = index & 0x7fff;
207 remap_rte->delivery_status = new_rte.delivery_status;
208 remap_rte->polarity = new_rte.polarity;
209 remap_rte->irr = new_rte.irr;
210 remap_rte->trigger = new_rte.trigger;
211 remap_rte->mask = new_rte.mask;
212 remap_rte->reserved = 0;
213 remap_rte->format = 1; /* indicate remap format */
214 }
216 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
217 iommu_flush_cache_entry(iremap_entry);
218 iommu_flush_iec_index(iommu, 0, index);
219 invalidate_sync(iommu);
221 unmap_vtd_domain_page(iremap_entries);
222 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
223 return 0;
224 }
226 unsigned int io_apic_read_remap_rte(
227 unsigned int apic, unsigned int reg)
228 {
229 struct IO_xAPIC_route_entry old_rte = { 0 };
230 struct IO_APIC_route_remap_entry *remap_rte;
231 int rte_upper = (reg & 1) ? 1 : 0;
232 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
233 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
235 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
236 ir_ctrl->iremap_index == -1 )
237 {
238 *IO_APIC_BASE(apic) = reg;
239 return *(IO_APIC_BASE(apic)+4);
240 }
242 if ( rte_upper )
243 reg--;
245 /* read lower and upper 32-bits of rte entry */
246 *IO_APIC_BASE(apic) = reg;
247 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
248 *IO_APIC_BASE(apic) = reg + 1;
249 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
251 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
253 if ( (remap_rte->format == 0) || (old_rte.delivery_mode == dest_SMI) )
254 {
255 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
256 return *(IO_APIC_BASE(apic)+4);
257 }
259 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
260 {
261 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
262 return *(IO_APIC_BASE(apic)+4);
263 }
265 if ( rte_upper )
266 return (*(((u32 *)&old_rte) + 1));
267 else
268 return (*(((u32 *)&old_rte) + 0));
269 }
271 void io_apic_write_remap_rte(
272 unsigned int apic, unsigned int reg, unsigned int value)
273 {
274 unsigned int ioapic_pin = (reg - 0x10) / 2;
275 struct IO_xAPIC_route_entry old_rte = { 0 };
276 struct IO_APIC_route_remap_entry *remap_rte;
277 unsigned int rte_upper = (reg & 1) ? 1 : 0;
278 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
279 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
280 int saved_mask;
282 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
283 {
284 *IO_APIC_BASE(apic) = reg;
285 *(IO_APIC_BASE(apic)+4) = value;
286 return;
287 }
289 if ( rte_upper )
290 reg--;
292 /* read both lower and upper 32-bits of rte entry */
293 *IO_APIC_BASE(apic) = reg;
294 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
295 *IO_APIC_BASE(apic) = reg + 1;
296 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
298 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
300 if ( old_rte.delivery_mode == dest_SMI )
301 {
302 /* Some BIOS does not zero out reserve fields in IOAPIC
303 * RTE's. clear_IO_APIC() zeroes out all RTE's except for RTE
304 * with MSI delivery type. This is a problem when the host
305 * OS converts SMI delivery type to some other type but leaving
306 * the reserved field uninitialized. This can cause interrupt
307 * remapping table out of bound error if "format" field is 1
308 * and the "index" field has a value that that is larger than
309 * the maximum index of interrupt remapping table.
310 */
311 if ( remap_rte->format == 1 )
312 {
313 remap_rte->format = 0;
314 *IO_APIC_BASE(apic) = reg;
315 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
316 *IO_APIC_BASE(apic) = reg + 1;
317 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
318 }
320 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
321 *(IO_APIC_BASE(apic)+4) = value;
322 return;
323 }
325 /* mask the interrupt while we change the intremap table */
326 saved_mask = remap_rte->mask;
327 remap_rte->mask = 1;
328 *IO_APIC_BASE(apic) = reg;
329 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
330 remap_rte->mask = saved_mask;
332 if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin,
333 &old_rte, rte_upper, value) )
334 {
335 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
336 *(IO_APIC_BASE(apic)+4) = value;
337 return;
338 }
340 /* write new entry to ioapic */
341 *IO_APIC_BASE(apic) = reg;
342 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
343 *IO_APIC_BASE(apic) = reg + 1;
344 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
345 }
347 #if defined(__i386__) || defined(__x86_64__)
348 static int remap_entry_to_msi_msg(
349 struct iommu *iommu, struct msi_msg *msg)
350 {
351 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
352 struct msi_msg_remap_entry *remap_rte;
353 int index;
354 unsigned long flags;
355 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
357 if ( ir_ctrl == NULL )
358 {
359 dprintk(XENLOG_ERR VTDPREFIX,
360 "remap_entry_to_msi_msg: ir_ctl == NULL");
361 return -EFAULT;
362 }
364 remap_rte = (struct msi_msg_remap_entry *) msg;
365 index = (remap_rte->address_lo.index_15 << 15) |
366 remap_rte->address_lo.index_0_14;
368 if ( index > ir_ctrl->iremap_index )
369 {
370 dprintk(XENLOG_ERR VTDPREFIX,
371 "%s: index (%d) is larger than remap table entry size (%d)\n",
372 __func__, index, ir_ctrl->iremap_index);
373 return -EFAULT;
374 }
376 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
378 iremap_entries =
379 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
380 iremap_entry = &iremap_entries[index];
382 msg->address_hi = MSI_ADDR_BASE_HI;
383 msg->address_lo =
384 MSI_ADDR_BASE_LO |
385 ((iremap_entry->lo.dm == 0) ?
386 MSI_ADDR_DESTMODE_PHYS:
387 MSI_ADDR_DESTMODE_LOGIC) |
388 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
389 MSI_ADDR_REDIRECTION_CPU:
390 MSI_ADDR_REDIRECTION_LOWPRI) |
391 iremap_entry->lo.dst >> 8;
393 msg->data =
394 MSI_DATA_TRIGGER_EDGE |
395 MSI_DATA_LEVEL_ASSERT |
396 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
397 MSI_DATA_DELIVERY_FIXED:
398 MSI_DATA_DELIVERY_LOWPRI) |
399 iremap_entry->lo.vector;
401 unmap_vtd_domain_page(iremap_entries);
402 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
403 return 0;
404 }
406 static int msi_msg_to_remap_entry(
407 struct iommu *iommu, struct pci_dev *pdev,
408 struct msi_desc *msi_desc, struct msi_msg *msg)
409 {
410 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
411 struct iremap_entry new_ire;
412 struct msi_msg_remap_entry *remap_rte;
413 unsigned int index;
414 unsigned long flags;
415 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
417 remap_rte = (struct msi_msg_remap_entry *) msg;
418 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
420 if ( msi_desc->remap_index < 0 )
421 {
422 ir_ctrl->iremap_index++;
423 index = ir_ctrl->iremap_index;
424 msi_desc->remap_index = index;
425 }
426 else
427 index = msi_desc->remap_index;
429 if ( index > IREMAP_ENTRY_NR - 1 )
430 {
431 dprintk(XENLOG_ERR VTDPREFIX,
432 "%s: intremap index (%d) is larger than"
433 " the maximum index (%ld)!\n",
434 __func__, index, IREMAP_ENTRY_NR - 1);
435 msi_desc->remap_index = -1;
436 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
437 return -EFAULT;
438 }
440 iremap_entries =
441 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
442 iremap_entry = &iremap_entries[index];
443 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
445 /* Set interrupt remapping table entry */
446 new_ire.lo.fpd = 0;
447 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
448 new_ire.lo.rh = 0;
449 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
450 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
451 new_ire.lo.avail = 0;
452 new_ire.lo.res_1 = 0;
453 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
454 MSI_DATA_VECTOR_MASK;
455 new_ire.lo.res_2 = 0;
456 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
457 & 0xff) << 8;
459 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
460 new_ire.hi.sq = 0;
461 new_ire.hi.svt = 1;
462 new_ire.hi.res_1 = 0;
463 new_ire.lo.p = 1; /* finally, set present bit */
465 /* now construct new MSI/MSI-X rte entry */
466 remap_rte->address_lo.dontcare = 0;
467 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
468 remap_rte->address_lo.index_0_14 = index & 0x7fff;
469 remap_rte->address_lo.SHV = 1;
470 remap_rte->address_lo.format = 1;
472 remap_rte->address_hi = 0;
473 remap_rte->data = 0;
475 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
476 iommu_flush_cache_entry(iremap_entry);
477 iommu_flush_iec_index(iommu, 0, index);
478 invalidate_sync(iommu);
480 unmap_vtd_domain_page(iremap_entries);
481 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
482 return 0;
483 }
485 void msi_msg_read_remap_rte(
486 struct msi_desc *msi_desc, struct msi_msg *msg)
487 {
488 struct pci_dev *pdev = msi_desc->dev;
489 struct acpi_drhd_unit *drhd = NULL;
490 struct iommu *iommu = NULL;
491 struct ir_ctrl *ir_ctrl;
493 drhd = acpi_find_matched_drhd_unit(pdev);
494 iommu = drhd->iommu;
496 ir_ctrl = iommu_ir_ctrl(iommu);
497 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
498 return;
500 remap_entry_to_msi_msg(iommu, msg);
501 }
503 void msi_msg_write_remap_rte(
504 struct msi_desc *msi_desc, struct msi_msg *msg)
505 {
506 struct pci_dev *pdev = msi_desc->dev;
507 struct acpi_drhd_unit *drhd = NULL;
508 struct iommu *iommu = NULL;
509 struct ir_ctrl *ir_ctrl;
511 drhd = acpi_find_matched_drhd_unit(pdev);
512 iommu = drhd->iommu;
514 ir_ctrl = iommu_ir_ctrl(iommu);
515 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
516 return;
518 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
519 }
520 #elif defined(__ia64__)
521 void msi_msg_read_remap_rte(
522 struct msi_desc *msi_desc, struct msi_msg *msg)
523 {
524 /* TODO. */
525 }
527 void msi_msg_write_remap_rte(
528 struct msi_desc *msi_desc, struct msi_msg *msg)
529 {
530 /* TODO. */
531 }
532 #endif
534 int enable_intremap(struct iommu *iommu)
535 {
536 struct ir_ctrl *ir_ctrl;
537 u32 sts, gcmd;
539 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
541 ir_ctrl = iommu_ir_ctrl(iommu);
542 if ( ir_ctrl->iremap_maddr == 0 )
543 {
544 ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
545 if ( ir_ctrl->iremap_maddr == 0 )
546 {
547 dprintk(XENLOG_WARNING VTDPREFIX,
548 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
549 return -ENOMEM;
550 }
551 ir_ctrl->iremap_index = -1;
552 }
554 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
555 /* set extended interrupt mode bit */
556 ir_ctrl->iremap_maddr |=
557 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
558 #endif
559 /* set size of the interrupt remapping table */
560 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
561 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
563 /* set SIRTP */
564 gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG);
565 gcmd |= DMA_GCMD_SIRTP;
566 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
568 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
569 (sts & DMA_GSTS_SIRTPS), sts);
571 /* After set SIRTP, must globally invalidate the interrupt entry cache */
572 iommu_flush_iec_global(iommu);
574 /* enable comaptiblity format interrupt pass through */
575 gcmd |= DMA_GCMD_CFI;
576 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
578 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
579 (sts & DMA_GSTS_CFIS), sts);
581 /* enable interrupt remapping hardware */
582 gcmd |= DMA_GCMD_IRE;
583 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
585 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
586 (sts & DMA_GSTS_IRES), sts);
588 return init_apic_pin_2_ir_idx();
589 }
591 void disable_intremap(struct iommu *iommu)
592 {
593 u32 sts;
595 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
597 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
598 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
600 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
601 !(sts & DMA_GSTS_IRES), sts);
602 }