ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 19810:aa472909b39c

vtd: IO NUMA support

This patch adds VT-d RHSA processing for IO NUMA support. The basic
idea is to parse ACPI RHSA structure to obtain VT-d HW to proximity
domain mapping. This mapping is then used when allocating pages for
Vt-d HW data structures.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 23 11:14:24 2009 +0100 (2009-06-23)
parents 703ced548925
children
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/list.h>
27 #include <xen/pci.h>
28 #include <xen/pci_regs.h>
29 #include "iommu.h"
30 #include "dmar.h"
31 #include "vtd.h"
32 #include "extern.h"
34 #ifdef __ia64__
35 #define dest_SMI -1
36 #define nr_ioapics iosapic_get_nr_iosapics()
37 #define nr_ioapic_registers(i) iosapic_get_nr_pins(i)
38 #else
39 #define nr_ioapic_registers(i) nr_ioapic_registers[i]
40 #endif
42 /*
43 * source validation type (SVT)
44 */
45 #define SVT_NO_VERIFY 0x0 /* no verification is required */
46 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
47 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
49 /*
50 * source-id qualifier (SQ)
51 */
52 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
53 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
54 * the third least significant bit
55 */
56 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
57 * the second and third least significant bits
58 */
59 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
60 * the least three significant bits
61 */
63 /* apic_pin_2_ir_idx[apicid][pin] = interrupt remapping table index */
64 static unsigned int **apic_pin_2_ir_idx;
66 static int init_apic_pin_2_ir_idx(void)
67 {
68 unsigned int *_apic_pin_2_ir_idx;
69 unsigned int nr_pins, i;
71 nr_pins = 0;
72 for ( i = 0; i < nr_ioapics; i++ )
73 nr_pins += nr_ioapic_registers(i);
75 _apic_pin_2_ir_idx = xmalloc_array(unsigned int, nr_pins);
76 apic_pin_2_ir_idx = xmalloc_array(unsigned int *, nr_ioapics);
77 if ( (_apic_pin_2_ir_idx == NULL) || (apic_pin_2_ir_idx == NULL) )
78 {
79 xfree(_apic_pin_2_ir_idx);
80 xfree(apic_pin_2_ir_idx);
81 return -ENOMEM;
82 }
84 for ( i = 0; i < nr_pins; i++ )
85 _apic_pin_2_ir_idx[i] = -1;
87 nr_pins = 0;
88 for ( i = 0; i < nr_ioapics; i++ )
89 {
90 apic_pin_2_ir_idx[i] = &_apic_pin_2_ir_idx[nr_pins];
91 nr_pins += nr_ioapic_registers(i);
92 }
94 return 0;
95 }
97 u16 apicid_to_bdf(int apic_id)
98 {
99 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
100 struct acpi_ioapic_unit *acpi_ioapic_unit;
102 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
103 if ( acpi_ioapic_unit->apic_id == apic_id )
104 return acpi_ioapic_unit->ioapic.info;
106 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
107 return 0;
108 }
110 static void set_ire_sid(struct iremap_entry *ire,
111 unsigned int svt, unsigned int sq, unsigned int sid)
112 {
113 ire->hi.svt = svt;
114 ire->hi.sq = sq;
115 ire->hi.sid = sid;
116 }
118 static void set_ioapic_source_id(int apic_id, struct iremap_entry *ire)
119 {
120 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
121 apicid_to_bdf(apic_id));
122 }
124 static int remap_entry_to_ioapic_rte(
125 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
126 {
127 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
128 struct IO_APIC_route_remap_entry *remap_rte;
129 int index = 0;
130 unsigned long flags;
131 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
133 if ( ir_ctrl == NULL )
134 {
135 dprintk(XENLOG_ERR VTDPREFIX,
136 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
137 return -EFAULT;
138 }
140 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
141 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
143 if ( index > ir_ctrl->iremap_index )
144 {
145 dprintk(XENLOG_ERR VTDPREFIX,
146 "%s: index (%d) is larger than remap table entry size (%d)!\n",
147 __func__, index, ir_ctrl->iremap_index);
148 return -EFAULT;
149 }
151 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
153 iremap_entries =
154 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
155 iremap_entry = &iremap_entries[index];
157 old_rte->vector = iremap_entry->lo.vector;
158 old_rte->delivery_mode = iremap_entry->lo.dlm;
159 old_rte->dest_mode = iremap_entry->lo.dm;
160 old_rte->trigger = iremap_entry->lo.tm;
161 old_rte->__reserved_2 = 0;
162 old_rte->dest.logical.__reserved_1 = 0;
163 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
165 unmap_vtd_domain_page(iremap_entries);
166 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
167 return 0;
168 }
170 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
171 int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
172 unsigned int rte_upper, unsigned int value)
173 {
174 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
175 struct iremap_entry new_ire;
176 struct IO_APIC_route_remap_entry *remap_rte;
177 struct IO_xAPIC_route_entry new_rte;
178 int index;
179 unsigned long flags;
180 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
182 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
183 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
185 index = apic_pin_2_ir_idx[apic][ioapic_pin];
186 if ( index < 0 )
187 {
188 ir_ctrl->iremap_index++;
189 index = ir_ctrl->iremap_index;
190 apic_pin_2_ir_idx[apic][ioapic_pin] = index;
191 }
193 if ( index > IREMAP_ENTRY_NR - 1 )
194 {
195 dprintk(XENLOG_ERR VTDPREFIX,
196 "%s: intremap index (%d) is larger than"
197 " the maximum index (%ld)!\n",
198 __func__, index, IREMAP_ENTRY_NR - 1);
199 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
200 return -EFAULT;
201 }
203 iremap_entries =
204 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
205 iremap_entry = &iremap_entries[index];
207 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
209 if ( rte_upper )
210 {
211 #if defined(__i386__) || defined(__x86_64__)
212 new_ire.lo.dst = (value >> 24) << 8;
213 #else /* __ia64__ */
214 new_ire.lo.dst = value >> 16;
215 #endif
216 }
217 else
218 {
219 *(((u32 *)&new_rte) + 0) = value;
220 new_ire.lo.fpd = 0;
221 new_ire.lo.dm = new_rte.dest_mode;
222 new_ire.lo.rh = 0;
223 new_ire.lo.tm = new_rte.trigger;
224 new_ire.lo.dlm = new_rte.delivery_mode;
225 new_ire.lo.avail = 0;
226 new_ire.lo.res_1 = 0;
227 new_ire.lo.vector = new_rte.vector;
228 new_ire.lo.res_2 = 0;
230 set_ioapic_source_id(IO_APIC_ID(apic), &new_ire);
231 new_ire.hi.res_1 = 0;
232 new_ire.lo.p = 1; /* finally, set present bit */
234 /* now construct new ioapic rte entry */
235 remap_rte->vector = new_rte.vector;
236 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
237 remap_rte->index_15 = (index >> 15) & 0x1;
238 remap_rte->index_0_14 = index & 0x7fff;
240 remap_rte->delivery_status = new_rte.delivery_status;
241 remap_rte->polarity = new_rte.polarity;
242 remap_rte->irr = new_rte.irr;
243 remap_rte->trigger = new_rte.trigger;
244 remap_rte->mask = new_rte.mask;
245 remap_rte->reserved = 0;
246 remap_rte->format = 1; /* indicate remap format */
247 }
249 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
250 iommu_flush_cache_entry(iremap_entry);
251 iommu_flush_iec_index(iommu, 0, index);
252 invalidate_sync(iommu);
254 unmap_vtd_domain_page(iremap_entries);
255 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
256 return 0;
257 }
259 unsigned int io_apic_read_remap_rte(
260 unsigned int apic, unsigned int reg)
261 {
262 struct IO_xAPIC_route_entry old_rte = { 0 };
263 struct IO_APIC_route_remap_entry *remap_rte;
264 int rte_upper = (reg & 1) ? 1 : 0;
265 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
266 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
268 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
269 ir_ctrl->iremap_index == -1 )
270 {
271 *IO_APIC_BASE(apic) = reg;
272 return *(IO_APIC_BASE(apic)+4);
273 }
275 if ( rte_upper )
276 reg--;
278 /* read lower and upper 32-bits of rte entry */
279 *IO_APIC_BASE(apic) = reg;
280 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
281 *IO_APIC_BASE(apic) = reg + 1;
282 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
284 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
286 if ( (remap_rte->format == 0) || (old_rte.delivery_mode == dest_SMI) )
287 {
288 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
289 return *(IO_APIC_BASE(apic)+4);
290 }
292 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
293 {
294 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
295 return *(IO_APIC_BASE(apic)+4);
296 }
298 if ( rte_upper )
299 return (*(((u32 *)&old_rte) + 1));
300 else
301 return (*(((u32 *)&old_rte) + 0));
302 }
304 void io_apic_write_remap_rte(
305 unsigned int apic, unsigned int reg, unsigned int value)
306 {
307 unsigned int ioapic_pin = (reg - 0x10) / 2;
308 struct IO_xAPIC_route_entry old_rte = { 0 };
309 struct IO_APIC_route_remap_entry *remap_rte;
310 unsigned int rte_upper = (reg & 1) ? 1 : 0;
311 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
312 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
313 int saved_mask;
315 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
316 {
317 *IO_APIC_BASE(apic) = reg;
318 *(IO_APIC_BASE(apic)+4) = value;
319 return;
320 }
322 if ( rte_upper )
323 reg--;
325 /* read both lower and upper 32-bits of rte entry */
326 *IO_APIC_BASE(apic) = reg;
327 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
328 *IO_APIC_BASE(apic) = reg + 1;
329 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
331 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
333 if ( old_rte.delivery_mode == dest_SMI )
334 {
335 /* Some BIOS does not zero out reserve fields in IOAPIC
336 * RTE's. clear_IO_APIC() zeroes out all RTE's except for RTE
337 * with MSI delivery type. This is a problem when the host
338 * OS converts SMI delivery type to some other type but leaving
339 * the reserved field uninitialized. This can cause interrupt
340 * remapping table out of bound error if "format" field is 1
341 * and the "index" field has a value that that is larger than
342 * the maximum index of interrupt remapping table.
343 */
344 if ( remap_rte->format == 1 )
345 {
346 remap_rte->format = 0;
347 *IO_APIC_BASE(apic) = reg;
348 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
349 *IO_APIC_BASE(apic) = reg + 1;
350 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
351 }
353 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
354 *(IO_APIC_BASE(apic)+4) = value;
355 return;
356 }
358 /* mask the interrupt while we change the intremap table */
359 saved_mask = remap_rte->mask;
360 remap_rte->mask = 1;
361 *IO_APIC_BASE(apic) = reg;
362 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
363 remap_rte->mask = saved_mask;
365 if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin,
366 &old_rte, rte_upper, value) )
367 {
368 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
369 *(IO_APIC_BASE(apic)+4) = value;
370 return;
371 }
373 /* write new entry to ioapic */
374 *IO_APIC_BASE(apic) = reg;
375 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
376 *IO_APIC_BASE(apic) = reg + 1;
377 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
378 }
380 #if defined(__i386__) || defined(__x86_64__)
382 static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire)
383 {
384 int type;
385 u8 bus, devfn, secbus;
386 int ret;
388 if ( !pdev || !ire )
389 return;
391 bus = pdev->bus;
392 devfn = pdev->devfn;
393 type = pdev_type(bus, devfn);
394 switch ( type )
395 {
396 case DEV_TYPE_PCIe_BRIDGE:
397 case DEV_TYPE_PCIe2PCI_BRIDGE:
398 case DEV_TYPE_LEGACY_PCI_BRIDGE:
399 break;
401 case DEV_TYPE_PCIe_ENDPOINT:
402 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, PCI_BDF2(bus, devfn));
403 break;
405 case DEV_TYPE_PCI:
406 ret = find_upstream_bridge(&bus, &devfn, &secbus);
407 if ( ret == 0 ) /* integrated PCI device */
408 {
409 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
410 PCI_BDF2(bus, devfn));
411 }
412 else if ( ret == 1 ) /* find upstream bridge */
413 {
414 if ( pdev_type(bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE )
415 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
416 (bus << 8) | pdev->bus);
417 else if ( pdev_type(bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
418 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
419 PCI_BDF2(bus, devfn));
420 }
421 break;
423 default:
424 gdprintk(XENLOG_WARNING VTDPREFIX,
425 "set_msi_source_id: unknown type : bdf = %x:%x.%x\n",
426 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
427 break;
428 }
429 }
431 static int remap_entry_to_msi_msg(
432 struct iommu *iommu, struct msi_msg *msg)
433 {
434 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
435 struct msi_msg_remap_entry *remap_rte;
436 int index;
437 unsigned long flags;
438 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
440 if ( ir_ctrl == NULL )
441 {
442 dprintk(XENLOG_ERR VTDPREFIX,
443 "remap_entry_to_msi_msg: ir_ctl == NULL");
444 return -EFAULT;
445 }
447 remap_rte = (struct msi_msg_remap_entry *) msg;
448 index = (remap_rte->address_lo.index_15 << 15) |
449 remap_rte->address_lo.index_0_14;
451 if ( index > ir_ctrl->iremap_index )
452 {
453 dprintk(XENLOG_ERR VTDPREFIX,
454 "%s: index (%d) is larger than remap table entry size (%d)\n",
455 __func__, index, ir_ctrl->iremap_index);
456 return -EFAULT;
457 }
459 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
461 iremap_entries =
462 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
463 iremap_entry = &iremap_entries[index];
465 msg->address_hi = MSI_ADDR_BASE_HI;
466 msg->address_lo =
467 MSI_ADDR_BASE_LO |
468 ((iremap_entry->lo.dm == 0) ?
469 MSI_ADDR_DESTMODE_PHYS:
470 MSI_ADDR_DESTMODE_LOGIC) |
471 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
472 MSI_ADDR_REDIRECTION_CPU:
473 MSI_ADDR_REDIRECTION_LOWPRI) |
474 iremap_entry->lo.dst >> 8;
476 msg->data =
477 MSI_DATA_TRIGGER_EDGE |
478 MSI_DATA_LEVEL_ASSERT |
479 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
480 MSI_DATA_DELIVERY_FIXED:
481 MSI_DATA_DELIVERY_LOWPRI) |
482 iremap_entry->lo.vector;
484 unmap_vtd_domain_page(iremap_entries);
485 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
486 return 0;
487 }
489 static int msi_msg_to_remap_entry(
490 struct iommu *iommu, struct pci_dev *pdev,
491 struct msi_desc *msi_desc, struct msi_msg *msg)
492 {
493 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
494 struct iremap_entry new_ire;
495 struct msi_msg_remap_entry *remap_rte;
496 unsigned int index;
497 unsigned long flags;
498 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
500 remap_rte = (struct msi_msg_remap_entry *) msg;
501 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
503 if ( msi_desc->remap_index < 0 )
504 {
505 ir_ctrl->iremap_index++;
506 index = ir_ctrl->iremap_index;
507 msi_desc->remap_index = index;
508 }
509 else
510 index = msi_desc->remap_index;
512 if ( index > IREMAP_ENTRY_NR - 1 )
513 {
514 dprintk(XENLOG_ERR VTDPREFIX,
515 "%s: intremap index (%d) is larger than"
516 " the maximum index (%ld)!\n",
517 __func__, index, IREMAP_ENTRY_NR - 1);
518 msi_desc->remap_index = -1;
519 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
520 return -EFAULT;
521 }
523 iremap_entries =
524 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
525 iremap_entry = &iremap_entries[index];
526 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
528 /* Set interrupt remapping table entry */
529 new_ire.lo.fpd = 0;
530 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
531 new_ire.lo.rh = 0;
532 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
533 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
534 new_ire.lo.avail = 0;
535 new_ire.lo.res_1 = 0;
536 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
537 MSI_DATA_VECTOR_MASK;
538 new_ire.lo.res_2 = 0;
539 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
540 & 0xff) << 8;
542 set_msi_source_id(pdev, &new_ire);
543 new_ire.hi.res_1 = 0;
544 new_ire.lo.p = 1; /* finally, set present bit */
546 /* now construct new MSI/MSI-X rte entry */
547 remap_rte->address_lo.dontcare = 0;
548 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
549 remap_rte->address_lo.index_0_14 = index & 0x7fff;
550 remap_rte->address_lo.SHV = 1;
551 remap_rte->address_lo.format = 1;
553 remap_rte->address_hi = 0;
554 remap_rte->data = 0;
556 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
557 iommu_flush_cache_entry(iremap_entry);
558 iommu_flush_iec_index(iommu, 0, index);
559 invalidate_sync(iommu);
561 unmap_vtd_domain_page(iremap_entries);
562 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
563 return 0;
564 }
566 void msi_msg_read_remap_rte(
567 struct msi_desc *msi_desc, struct msi_msg *msg)
568 {
569 struct pci_dev *pdev = msi_desc->dev;
570 struct acpi_drhd_unit *drhd = NULL;
571 struct iommu *iommu = NULL;
572 struct ir_ctrl *ir_ctrl;
574 drhd = acpi_find_matched_drhd_unit(pdev);
575 iommu = drhd->iommu;
577 ir_ctrl = iommu_ir_ctrl(iommu);
578 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
579 return;
581 remap_entry_to_msi_msg(iommu, msg);
582 }
584 void msi_msg_write_remap_rte(
585 struct msi_desc *msi_desc, struct msi_msg *msg)
586 {
587 struct pci_dev *pdev = msi_desc->dev;
588 struct acpi_drhd_unit *drhd = NULL;
589 struct iommu *iommu = NULL;
590 struct ir_ctrl *ir_ctrl;
592 drhd = acpi_find_matched_drhd_unit(pdev);
593 iommu = drhd->iommu;
595 ir_ctrl = iommu_ir_ctrl(iommu);
596 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
597 return;
599 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
600 }
601 #elif defined(__ia64__)
602 void msi_msg_read_remap_rte(
603 struct msi_desc *msi_desc, struct msi_msg *msg)
604 {
605 /* TODO. */
606 }
608 void msi_msg_write_remap_rte(
609 struct msi_desc *msi_desc, struct msi_msg *msg)
610 {
611 /* TODO. */
612 }
613 #endif
615 int enable_intremap(struct iommu *iommu)
616 {
617 struct acpi_drhd_unit *drhd;
618 struct ir_ctrl *ir_ctrl;
619 u32 sts, gcmd;
620 unsigned long flags;
622 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
624 ir_ctrl = iommu_ir_ctrl(iommu);
625 if ( ir_ctrl->iremap_maddr == 0 )
626 {
627 drhd = iommu_to_drhd(iommu);
628 ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, 1);
629 if ( ir_ctrl->iremap_maddr == 0 )
630 {
631 dprintk(XENLOG_WARNING VTDPREFIX,
632 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
633 return -ENOMEM;
634 }
635 ir_ctrl->iremap_index = -1;
636 }
638 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
639 /* set extended interrupt mode bit */
640 ir_ctrl->iremap_maddr |=
641 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
642 #endif
643 spin_lock_irqsave(&iommu->register_lock, flags);
645 /* set size of the interrupt remapping table */
646 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
647 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
649 /* set SIRTP */
650 gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG);
651 gcmd |= DMA_GCMD_SIRTP;
652 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
654 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
655 (sts & DMA_GSTS_SIRTPS), sts);
656 spin_unlock_irqrestore(&iommu->register_lock, flags);
658 /* After set SIRTP, must globally invalidate the interrupt entry cache */
659 iommu_flush_iec_global(iommu);
661 spin_lock_irqsave(&iommu->register_lock, flags);
662 /* enable comaptiblity format interrupt pass through */
663 gcmd |= DMA_GCMD_CFI;
664 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
666 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
667 (sts & DMA_GSTS_CFIS), sts);
669 /* enable interrupt remapping hardware */
670 gcmd |= DMA_GCMD_IRE;
671 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
673 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
674 (sts & DMA_GSTS_IRES), sts);
675 spin_unlock_irqrestore(&iommu->register_lock, flags);
677 return init_apic_pin_2_ir_idx();
678 }
680 void disable_intremap(struct iommu *iommu)
681 {
682 u32 sts;
683 unsigned long flags;
685 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
687 spin_lock_irqsave(&iommu->register_lock, flags);
688 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
689 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
691 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
692 !(sts & DMA_GSTS_IRES), sts);
693 spin_unlock_irqrestore(&iommu->register_lock, flags);
694 }