ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 18781:40668908260c

vtd: fix interrupt remapping to handle SMI RTE's with uninitialized
reserved fields

Some BIOS does not zero out reserve fields in IOAPIC RTE's.
clear_IO_APIC() zeroes out all RTE's except for RTE with MSI delivery
type. This is a problem when the host OS converts SMI delivery type
to some other type but leaving the reserved field uninitialized. This
can cause interrupt remapping table out of bound error if "format"
field is 1 and the uninitialized "index" field has a value that that
is larger than the maximum index of interrupt remapping table.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>=
author Keir Fraser <keir.fraser@citrix.com>
date Mon Nov 10 10:41:41 2008 +0000 (2008-11-10)
parents dacc54242a63
children 3ba83def85a2
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/pci.h>
27 #include <xen/pci_regs.h>
28 #include "iommu.h"
29 #include "dmar.h"
30 #include "vtd.h"
31 #include "extern.h"
33 u16 apicid_to_bdf(int apic_id)
34 {
35 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
36 struct acpi_ioapic_unit *acpi_ioapic_unit;
38 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
39 if ( acpi_ioapic_unit->apic_id == apic_id )
40 return acpi_ioapic_unit->ioapic.info;
42 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
43 return 0;
44 }
46 static int remap_entry_to_ioapic_rte(
47 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
48 {
49 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
50 struct IO_APIC_route_remap_entry *remap_rte;
51 int index = 0;
52 unsigned long flags;
53 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
55 if ( ir_ctrl == NULL )
56 {
57 dprintk(XENLOG_ERR VTDPREFIX,
58 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
59 return -EFAULT;
60 }
62 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
63 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
65 if ( index > ir_ctrl->iremap_index )
66 {
67 dprintk(XENLOG_ERR VTDPREFIX,
68 "%s: index (%d) is larger than remap table entry size (%d)!\n",
69 __func__, index, ir_ctrl->iremap_index);
70 return -EFAULT;
71 }
73 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
75 iremap_entries =
76 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
77 iremap_entry = &iremap_entries[index];
79 old_rte->vector = iremap_entry->lo.vector;
80 old_rte->delivery_mode = iremap_entry->lo.dlm;
81 old_rte->dest_mode = iremap_entry->lo.dm;
82 old_rte->trigger = iremap_entry->lo.tm;
83 old_rte->__reserved_2 = 0;
84 old_rte->dest.logical.__reserved_1 = 0;
85 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
87 unmap_vtd_domain_page(iremap_entries);
88 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
89 return 0;
90 }
92 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
93 int apic_id, struct IO_xAPIC_route_entry *old_rte,
94 unsigned int rte_upper, unsigned int value)
95 {
96 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
97 struct iremap_entry new_ire;
98 struct IO_APIC_route_remap_entry *remap_rte;
99 struct IO_xAPIC_route_entry new_rte;
100 int index;
101 unsigned long flags;
102 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
104 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
105 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
107 if ( remap_rte->format == 0 )
108 {
109 ir_ctrl->iremap_index++;
110 index = ir_ctrl->iremap_index;
111 }
112 else
113 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
115 if ( index > IREMAP_ENTRY_NR - 1 )
116 {
117 dprintk(XENLOG_ERR VTDPREFIX,
118 "%s: intremap index (%d) is larger than"
119 " the maximum index (%ld)!\n",
120 __func__, index, IREMAP_ENTRY_NR - 1);
121 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
122 return -EFAULT;
123 }
125 iremap_entries =
126 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
127 iremap_entry = &iremap_entries[index];
129 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
131 if ( rte_upper )
132 {
133 #if defined(__i386__) || defined(__x86_64__)
134 new_ire.lo.dst = (value >> 24) << 8;
135 #else /* __ia64__ */
136 new_ire.lo.dst = value >> 16;
137 #endif
138 }
139 else
140 {
141 *(((u32 *)&new_rte) + 0) = value;
142 new_ire.lo.fpd = 0;
143 new_ire.lo.dm = new_rte.dest_mode;
144 new_ire.lo.rh = 0;
145 new_ire.lo.tm = new_rte.trigger;
146 new_ire.lo.dlm = new_rte.delivery_mode;
147 new_ire.lo.avail = 0;
148 new_ire.lo.res_1 = 0;
149 new_ire.lo.vector = new_rte.vector;
150 new_ire.lo.res_2 = 0;
151 new_ire.hi.sid = apicid_to_bdf(apic_id);
153 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
154 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
155 new_ire.hi.res_1 = 0;
156 new_ire.lo.p = 1; /* finally, set present bit */
158 /* now construct new ioapic rte entry */
159 remap_rte->vector = new_rte.vector;
160 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
161 remap_rte->index_15 = (index >> 15) & 0x1;
162 remap_rte->index_0_14 = index & 0x7fff;
164 remap_rte->delivery_status = new_rte.delivery_status;
165 remap_rte->polarity = new_rte.polarity;
166 remap_rte->irr = new_rte.irr;
167 remap_rte->trigger = new_rte.trigger;
168 remap_rte->mask = new_rte.mask;
169 remap_rte->reserved = 0;
170 remap_rte->format = 1; /* indicate remap format */
171 }
173 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
174 iommu_flush_cache_entry(iremap_entry);
175 iommu_flush_iec_index(iommu, 0, index);
176 invalidate_sync(iommu);
178 unmap_vtd_domain_page(iremap_entries);
179 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
180 return 0;
181 }
183 unsigned int io_apic_read_remap_rte(
184 unsigned int apic, unsigned int reg)
185 {
186 struct IO_xAPIC_route_entry old_rte = { 0 };
187 struct IO_APIC_route_remap_entry *remap_rte;
188 int rte_upper = (reg & 1) ? 1 : 0;
189 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
190 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
192 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
193 ir_ctrl->iremap_index == -1 )
194 {
195 *IO_APIC_BASE(apic) = reg;
196 return *(IO_APIC_BASE(apic)+4);
197 }
199 if ( rte_upper )
200 reg--;
202 /* read lower and upper 32-bits of rte entry */
203 *IO_APIC_BASE(apic) = reg;
204 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
205 *IO_APIC_BASE(apic) = reg + 1;
206 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
208 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
210 if ( (remap_rte->format == 0) || (old_rte.delivery_mode == dest_SMI) )
211 {
212 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
213 return *(IO_APIC_BASE(apic)+4);
214 }
216 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
217 {
218 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
219 return *(IO_APIC_BASE(apic)+4);
220 }
222 if ( rte_upper )
223 return (*(((u32 *)&old_rte) + 1));
224 else
225 return (*(((u32 *)&old_rte) + 0));
226 }
228 void io_apic_write_remap_rte(
229 unsigned int apic, unsigned int reg, unsigned int value)
230 {
231 struct IO_xAPIC_route_entry old_rte = { 0 };
232 struct IO_APIC_route_remap_entry *remap_rte;
233 unsigned int rte_upper = (reg & 1) ? 1 : 0;
234 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
235 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
236 int saved_mask;
238 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
239 {
240 *IO_APIC_BASE(apic) = reg;
241 *(IO_APIC_BASE(apic)+4) = value;
242 return;
243 }
245 if ( rte_upper )
246 reg--;
248 /* read both lower and upper 32-bits of rte entry */
249 *IO_APIC_BASE(apic) = reg;
250 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
251 *IO_APIC_BASE(apic) = reg + 1;
252 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
254 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
256 if ( old_rte.delivery_mode == dest_SMI )
257 {
258 /* Some BIOS does not zero out reserve fields in IOAPIC
259 * RTE's. clear_IO_APIC() zeroes out all RTE's except for RTE
260 * with MSI delivery type. This is a problem when the host
261 * OS converts SMI delivery type to some other type but leaving
262 * the reserved field uninitialized. This can cause interrupt
263 * remapping table out of bound error if "format" field is 1
264 * and the "index" field has a value that that is larger than
265 * the maximum index of interrupt remapping table.
266 */
267 if ( remap_rte->format == 1 )
268 {
269 remap_rte->format = 0;
270 *IO_APIC_BASE(apic) = reg;
271 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
272 *IO_APIC_BASE(apic) = reg + 1;
273 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
274 }
276 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
277 *(IO_APIC_BASE(apic)+4) = value;
278 return;
279 }
281 /* mask the interrupt while we change the intremap table */
282 saved_mask = remap_rte->mask;
283 remap_rte->mask = 1;
284 *IO_APIC_BASE(apic) = reg;
285 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
286 remap_rte->mask = saved_mask;
288 if ( ioapic_rte_to_remap_entry(iommu, IO_APIC_ID(apic),
289 &old_rte, rte_upper, value) )
290 {
291 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
292 *(IO_APIC_BASE(apic)+4) = value;
293 return;
294 }
296 /* write new entry to ioapic */
297 *IO_APIC_BASE(apic) = reg;
298 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
299 *IO_APIC_BASE(apic) = reg + 1;
300 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
301 }
303 #if defined(__i386__) || defined(__x86_64__)
304 static int remap_entry_to_msi_msg(
305 struct iommu *iommu, struct msi_msg *msg)
306 {
307 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
308 struct msi_msg_remap_entry *remap_rte;
309 int index;
310 unsigned long flags;
311 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
313 if ( ir_ctrl == NULL )
314 {
315 dprintk(XENLOG_ERR VTDPREFIX,
316 "remap_entry_to_msi_msg: ir_ctl == NULL");
317 return -EFAULT;
318 }
320 remap_rte = (struct msi_msg_remap_entry *) msg;
321 index = (remap_rte->address_lo.index_15 << 15) |
322 remap_rte->address_lo.index_0_14;
324 if ( index > ir_ctrl->iremap_index )
325 {
326 dprintk(XENLOG_ERR VTDPREFIX,
327 "%s: index (%d) is larger than remap table entry size (%d)\n",
328 __func__, index, ir_ctrl->iremap_index);
329 return -EFAULT;
330 }
332 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
334 iremap_entries =
335 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
336 iremap_entry = &iremap_entries[index];
338 msg->address_hi = MSI_ADDR_BASE_HI;
339 msg->address_lo =
340 MSI_ADDR_BASE_LO |
341 ((iremap_entry->lo.dm == 0) ?
342 MSI_ADDR_DESTMODE_PHYS:
343 MSI_ADDR_DESTMODE_LOGIC) |
344 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
345 MSI_ADDR_REDIRECTION_CPU:
346 MSI_ADDR_REDIRECTION_LOWPRI) |
347 iremap_entry->lo.dst >> 8;
349 msg->data =
350 MSI_DATA_TRIGGER_EDGE |
351 MSI_DATA_LEVEL_ASSERT |
352 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
353 MSI_DATA_DELIVERY_FIXED:
354 MSI_DATA_DELIVERY_LOWPRI) |
355 iremap_entry->lo.vector;
357 unmap_vtd_domain_page(iremap_entries);
358 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
359 return 0;
360 }
362 static int msi_msg_to_remap_entry(
363 struct iommu *iommu, struct pci_dev *pdev,
364 struct msi_desc *msi_desc, struct msi_msg *msg)
365 {
366 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
367 struct iremap_entry new_ire;
368 struct msi_msg_remap_entry *remap_rte;
369 unsigned int index;
370 unsigned long flags;
371 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
373 remap_rte = (struct msi_msg_remap_entry *) msg;
374 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
376 if ( msi_desc->remap_index < 0 )
377 {
378 ir_ctrl->iremap_index++;
379 index = ir_ctrl->iremap_index;
380 msi_desc->remap_index = index;
381 }
382 else
383 index = msi_desc->remap_index;
385 if ( index > IREMAP_ENTRY_NR - 1 )
386 {
387 dprintk(XENLOG_ERR VTDPREFIX,
388 "%s: intremap index (%d) is larger than"
389 " the maximum index (%ld)!\n",
390 __func__, index, IREMAP_ENTRY_NR - 1);
391 msi_desc->remap_index = -1;
392 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
393 return -EFAULT;
394 }
396 iremap_entries =
397 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
398 iremap_entry = &iremap_entries[index];
399 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
401 /* Set interrupt remapping table entry */
402 new_ire.lo.fpd = 0;
403 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
404 new_ire.lo.rh = 0;
405 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
406 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
407 new_ire.lo.avail = 0;
408 new_ire.lo.res_1 = 0;
409 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
410 MSI_DATA_VECTOR_MASK;
411 new_ire.lo.res_2 = 0;
412 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
413 & 0xff) << 8;
415 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
416 new_ire.hi.sq = 0;
417 new_ire.hi.svt = 1;
418 new_ire.hi.res_1 = 0;
419 new_ire.lo.p = 1; /* finally, set present bit */
421 /* now construct new MSI/MSI-X rte entry */
422 remap_rte->address_lo.dontcare = 0;
423 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
424 remap_rte->address_lo.index_0_14 = index & 0x7fff;
425 remap_rte->address_lo.SHV = 1;
426 remap_rte->address_lo.format = 1;
428 remap_rte->address_hi = 0;
429 remap_rte->data = 0;
431 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
432 iommu_flush_cache_entry(iremap_entry);
433 iommu_flush_iec_index(iommu, 0, index);
434 invalidate_sync(iommu);
436 unmap_vtd_domain_page(iremap_entries);
437 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
438 return 0;
439 }
441 void msi_msg_read_remap_rte(
442 struct msi_desc *msi_desc, struct msi_msg *msg)
443 {
444 struct pci_dev *pdev = msi_desc->dev;
445 struct acpi_drhd_unit *drhd = NULL;
446 struct iommu *iommu = NULL;
447 struct ir_ctrl *ir_ctrl;
449 drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn);
450 iommu = drhd->iommu;
452 ir_ctrl = iommu_ir_ctrl(iommu);
453 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
454 return;
456 remap_entry_to_msi_msg(iommu, msg);
457 }
459 void msi_msg_write_remap_rte(
460 struct msi_desc *msi_desc, struct msi_msg *msg)
461 {
462 struct pci_dev *pdev = msi_desc->dev;
463 struct acpi_drhd_unit *drhd = NULL;
464 struct iommu *iommu = NULL;
465 struct ir_ctrl *ir_ctrl;
467 drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn);
468 iommu = drhd->iommu;
470 ir_ctrl = iommu_ir_ctrl(iommu);
471 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
472 return;
474 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
475 }
476 #elif defined(__ia64__)
477 void msi_msg_read_remap_rte(
478 struct msi_desc *msi_desc, struct msi_msg *msg)
479 {
480 /* TODO. */
481 }
483 void msi_msg_write_remap_rte(
484 struct msi_desc *msi_desc, struct msi_msg *msg)
485 {
486 /* TODO. */
487 }
488 #endif
490 int intremap_setup(struct iommu *iommu)
491 {
492 struct ir_ctrl *ir_ctrl;
493 s_time_t start_time;
495 if ( !ecap_intr_remap(iommu->ecap) )
496 return -ENODEV;
498 ir_ctrl = iommu_ir_ctrl(iommu);
499 if ( ir_ctrl->iremap_maddr == 0 )
500 {
501 ir_ctrl->iremap_maddr = alloc_pgtable_maddr();
502 if ( ir_ctrl->iremap_maddr == 0 )
503 {
504 dprintk(XENLOG_WARNING VTDPREFIX,
505 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
506 return -ENOMEM;
507 }
508 ir_ctrl->iremap_index = -1;
509 }
511 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
512 /* set extended interrupt mode bit */
513 ir_ctrl->iremap_maddr |=
514 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
515 #endif
516 /* set size of the interrupt remapping table */
517 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
518 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
520 /* set SIRTP */
521 iommu->gcmd |= DMA_GCMD_SIRTP;
522 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
524 /* Make sure hardware complete it */
525 start_time = NOW();
526 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
527 {
528 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
529 {
530 dprintk(XENLOG_ERR VTDPREFIX,
531 "Cannot set SIRTP field for interrupt remapping\n");
532 return -ENODEV;
533 }
534 cpu_relax();
535 }
537 /* enable comaptiblity format interrupt pass through */
538 iommu->gcmd |= DMA_GCMD_CFI;
539 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
541 start_time = NOW();
542 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
543 {
544 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
545 {
546 dprintk(XENLOG_ERR VTDPREFIX,
547 "Cannot set CFI field for interrupt remapping\n");
548 return -ENODEV;
549 }
550 cpu_relax();
551 }
553 /* enable interrupt remapping hardware */
554 iommu->gcmd |= DMA_GCMD_IRE;
555 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
557 start_time = NOW();
558 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
559 {
560 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
561 {
562 dprintk(XENLOG_ERR VTDPREFIX,
563 "Cannot set IRE field for interrupt remapping\n");
564 return -ENODEV;
565 }
566 cpu_relax();
567 }
569 /* After set SIRTP, we should do globally invalidate the IEC */
570 iommu_flush_iec_global(iommu);
572 return 0;
573 }