ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 18658:10a2069a1edb

Define a macro IO_APIC_ID() for x86.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 15:13:50 2008 +0100 (2008-10-20)
parents 609d0d34450f
children 2a25fd94c6f2
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/pci.h>
27 #include <xen/pci_regs.h>
28 #include "iommu.h"
29 #include "dmar.h"
30 #include "vtd.h"
31 #include "extern.h"
33 u16 apicid_to_bdf(int apic_id)
34 {
35 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
36 struct acpi_ioapic_unit *acpi_ioapic_unit;
38 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
39 if ( acpi_ioapic_unit->apic_id == apic_id )
40 return acpi_ioapic_unit->ioapic.info;
42 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
43 return 0;
44 }
46 static int remap_entry_to_ioapic_rte(
47 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
48 {
49 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
50 struct IO_APIC_route_remap_entry *remap_rte;
51 int index = 0;
52 unsigned long flags;
53 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
55 if ( ir_ctrl == NULL )
56 {
57 dprintk(XENLOG_ERR VTDPREFIX,
58 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
59 return -EFAULT;
60 }
62 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
63 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
65 if ( index > ir_ctrl->iremap_index )
66 {
67 dprintk(XENLOG_ERR VTDPREFIX,
68 "%s: index (%d) is larger than remap table entry size (%d)!\n",
69 __func__, index, ir_ctrl->iremap_index);
70 return -EFAULT;
71 }
73 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
75 iremap_entries =
76 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
77 iremap_entry = &iremap_entries[index];
79 old_rte->vector = iremap_entry->lo.vector;
80 old_rte->delivery_mode = iremap_entry->lo.dlm;
81 old_rte->dest_mode = iremap_entry->lo.dm;
82 old_rte->trigger = iremap_entry->lo.tm;
83 old_rte->__reserved_2 = 0;
84 old_rte->dest.logical.__reserved_1 = 0;
85 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
87 unmap_vtd_domain_page(iremap_entries);
88 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
89 return 0;
90 }
92 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
93 int apic_id, struct IO_xAPIC_route_entry *old_rte,
94 unsigned int rte_upper, unsigned int value)
95 {
96 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
97 struct iremap_entry new_ire;
98 struct IO_APIC_route_remap_entry *remap_rte;
99 struct IO_xAPIC_route_entry new_rte;
100 int index;
101 unsigned long flags;
102 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
104 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
105 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
107 if ( remap_rte->format == 0 )
108 {
109 ir_ctrl->iremap_index++;
110 index = ir_ctrl->iremap_index;
111 }
112 else
113 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
115 if ( index > IREMAP_ENTRY_NR - 1 )
116 {
117 dprintk(XENLOG_ERR VTDPREFIX,
118 "%s: intremap index (%d) is larger than"
119 " the maximum index (%ld)!\n",
120 __func__, index, IREMAP_ENTRY_NR - 1);
121 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
122 return -EFAULT;
123 }
125 iremap_entries =
126 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
127 iremap_entry = &iremap_entries[index];
129 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
131 if ( rte_upper )
132 new_ire.lo.dst = (value >> 24) << 8;
133 else
134 {
135 *(((u32 *)&new_rte) + 0) = value;
136 new_ire.lo.fpd = 0;
137 new_ire.lo.dm = new_rte.dest_mode;
138 new_ire.lo.rh = 0;
139 new_ire.lo.tm = new_rte.trigger;
140 new_ire.lo.dlm = new_rte.delivery_mode;
141 new_ire.lo.avail = 0;
142 new_ire.lo.res_1 = 0;
143 new_ire.lo.vector = new_rte.vector;
144 new_ire.lo.res_2 = 0;
145 new_ire.hi.sid = apicid_to_bdf(apic_id);
147 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
148 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
149 new_ire.hi.res_1 = 0;
150 new_ire.lo.p = 1; /* finally, set present bit */
152 /* now construct new ioapic rte entry */
153 remap_rte->vector = new_rte.vector;
154 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
155 remap_rte->index_15 = (index >> 15) & 0x1;
156 remap_rte->index_0_14 = index & 0x7fff;
158 remap_rte->delivery_status = new_rte.delivery_status;
159 remap_rte->polarity = new_rte.polarity;
160 remap_rte->irr = new_rte.irr;
161 remap_rte->trigger = new_rte.trigger;
162 remap_rte->mask = new_rte.mask;
163 remap_rte->reserved = 0;
164 remap_rte->format = 1; /* indicate remap format */
165 }
167 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
168 iommu_flush_cache_entry(iremap_entry);
169 iommu_flush_iec_index(iommu, 0, index);
170 invalidate_sync(iommu);
172 unmap_vtd_domain_page(iremap_entries);
173 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
174 return 0;
175 }
177 unsigned int io_apic_read_remap_rte(
178 unsigned int apic, unsigned int reg)
179 {
180 struct IO_xAPIC_route_entry old_rte = { 0 };
181 struct IO_APIC_route_remap_entry *remap_rte;
182 int rte_upper = (reg & 1) ? 1 : 0;
183 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
184 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
186 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
187 ir_ctrl->iremap_index == -1 )
188 {
189 *IO_APIC_BASE(apic) = reg;
190 return *(IO_APIC_BASE(apic)+4);
191 }
193 if ( rte_upper )
194 reg--;
196 /* read lower and upper 32-bits of rte entry */
197 *IO_APIC_BASE(apic) = reg;
198 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
199 *IO_APIC_BASE(apic) = reg + 1;
200 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
202 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
204 if ( remap_rte->format == 0 )
205 {
206 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
207 return *(IO_APIC_BASE(apic)+4);
208 }
210 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
211 {
212 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
213 return *(IO_APIC_BASE(apic)+4);
214 }
216 if ( rte_upper )
217 return (*(((u32 *)&old_rte) + 1));
218 else
219 return (*(((u32 *)&old_rte) + 0));
220 }
222 void io_apic_write_remap_rte(
223 unsigned int apic, unsigned int reg, unsigned int value)
224 {
225 struct IO_xAPIC_route_entry old_rte = { 0 };
226 struct IO_APIC_route_remap_entry *remap_rte;
227 unsigned int rte_upper = (reg & 1) ? 1 : 0;
228 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
229 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
230 int saved_mask;
232 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
233 {
234 *IO_APIC_BASE(apic) = reg;
235 *(IO_APIC_BASE(apic)+4) = value;
236 return;
237 }
239 if ( rte_upper )
240 reg--;
242 /* read both lower and upper 32-bits of rte entry */
243 *IO_APIC_BASE(apic) = reg;
244 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
245 *IO_APIC_BASE(apic) = reg + 1;
246 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
248 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
250 /* mask the interrupt while we change the intremap table */
251 saved_mask = remap_rte->mask;
252 remap_rte->mask = 1;
253 *IO_APIC_BASE(apic) = reg;
254 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
255 remap_rte->mask = saved_mask;
257 if ( ioapic_rte_to_remap_entry(iommu, IO_APIC_ID(apic),
258 &old_rte, rte_upper, value) )
259 {
260 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
261 *(IO_APIC_BASE(apic)+4) = value;
262 return;
263 }
265 /* write new entry to ioapic */
266 *IO_APIC_BASE(apic) = reg;
267 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
268 *IO_APIC_BASE(apic) = reg + 1;
269 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
270 }
272 #if defined(__i386__) || defined(__x86_64__)
273 static int remap_entry_to_msi_msg(
274 struct iommu *iommu, struct msi_msg *msg)
275 {
276 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
277 struct msi_msg_remap_entry *remap_rte;
278 int index;
279 unsigned long flags;
280 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
282 if ( ir_ctrl == NULL )
283 {
284 dprintk(XENLOG_ERR VTDPREFIX,
285 "remap_entry_to_msi_msg: ir_ctl == NULL");
286 return -EFAULT;
287 }
289 remap_rte = (struct msi_msg_remap_entry *) msg;
290 index = (remap_rte->address_lo.index_15 << 15) |
291 remap_rte->address_lo.index_0_14;
293 if ( index > ir_ctrl->iremap_index )
294 {
295 dprintk(XENLOG_ERR VTDPREFIX,
296 "%s: index (%d) is larger than remap table entry size (%d)\n",
297 __func__, index, ir_ctrl->iremap_index);
298 return -EFAULT;
299 }
301 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
303 iremap_entries =
304 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
305 iremap_entry = &iremap_entries[index];
307 msg->address_hi = MSI_ADDR_BASE_HI;
308 msg->address_lo =
309 MSI_ADDR_BASE_LO |
310 ((iremap_entry->lo.dm == 0) ?
311 MSI_ADDR_DESTMODE_PHYS:
312 MSI_ADDR_DESTMODE_LOGIC) |
313 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
314 MSI_ADDR_REDIRECTION_CPU:
315 MSI_ADDR_REDIRECTION_LOWPRI) |
316 iremap_entry->lo.dst >> 8;
318 msg->data =
319 MSI_DATA_TRIGGER_EDGE |
320 MSI_DATA_LEVEL_ASSERT |
321 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
322 MSI_DATA_DELIVERY_FIXED:
323 MSI_DATA_DELIVERY_LOWPRI) |
324 iremap_entry->lo.vector;
326 unmap_vtd_domain_page(iremap_entries);
327 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
328 return 0;
329 }
331 static int msi_msg_to_remap_entry(
332 struct iommu *iommu, struct pci_dev *pdev,
333 struct msi_desc *msi_desc, struct msi_msg *msg)
334 {
335 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
336 struct iremap_entry new_ire;
337 struct msi_msg_remap_entry *remap_rte;
338 unsigned int index;
339 unsigned long flags;
340 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
342 remap_rte = (struct msi_msg_remap_entry *) msg;
343 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
345 if ( msi_desc->remap_index < 0 )
346 {
347 ir_ctrl->iremap_index++;
348 index = ir_ctrl->iremap_index;
349 msi_desc->remap_index = index;
350 }
351 else
352 index = msi_desc->remap_index;
354 if ( index > IREMAP_ENTRY_NR - 1 )
355 {
356 dprintk(XENLOG_ERR VTDPREFIX,
357 "%s: intremap index (%d) is larger than"
358 " the maximum index (%ld)!\n",
359 __func__, index, IREMAP_ENTRY_NR - 1);
360 msi_desc->remap_index = -1;
361 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
362 return -EFAULT;
363 }
365 iremap_entries =
366 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
367 iremap_entry = &iremap_entries[index];
368 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
370 /* Set interrupt remapping table entry */
371 new_ire.lo.fpd = 0;
372 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
373 new_ire.lo.rh = 0;
374 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
375 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
376 new_ire.lo.avail = 0;
377 new_ire.lo.res_1 = 0;
378 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
379 MSI_DATA_VECTOR_MASK;
380 new_ire.lo.res_2 = 0;
381 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
382 & 0xff) << 8;
384 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
385 new_ire.hi.sq = 0;
386 new_ire.hi.svt = 1;
387 new_ire.hi.res_1 = 0;
388 new_ire.lo.p = 1; /* finally, set present bit */
390 /* now construct new MSI/MSI-X rte entry */
391 remap_rte->address_lo.dontcare = 0;
392 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
393 remap_rte->address_lo.index_0_14 = index & 0x7fff;
394 remap_rte->address_lo.SHV = 1;
395 remap_rte->address_lo.format = 1;
397 remap_rte->address_hi = 0;
398 remap_rte->data = 0;
400 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
401 iommu_flush_cache_entry(iremap_entry);
402 iommu_flush_iec_index(iommu, 0, index);
403 invalidate_sync(iommu);
405 unmap_vtd_domain_page(iremap_entries);
406 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
407 return 0;
408 }
410 void msi_msg_read_remap_rte(
411 struct msi_desc *msi_desc, struct msi_msg *msg)
412 {
413 struct pci_dev *pdev = msi_desc->dev;
414 struct acpi_drhd_unit *drhd = NULL;
415 struct iommu *iommu = NULL;
416 struct ir_ctrl *ir_ctrl;
418 drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn);
419 iommu = drhd->iommu;
421 ir_ctrl = iommu_ir_ctrl(iommu);
422 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
423 return;
425 remap_entry_to_msi_msg(iommu, msg);
426 }
428 void msi_msg_write_remap_rte(
429 struct msi_desc *msi_desc, struct msi_msg *msg)
430 {
431 struct pci_dev *pdev = msi_desc->dev;
432 struct acpi_drhd_unit *drhd = NULL;
433 struct iommu *iommu = NULL;
434 struct ir_ctrl *ir_ctrl;
436 drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn);
437 iommu = drhd->iommu;
439 ir_ctrl = iommu_ir_ctrl(iommu);
440 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
441 return;
443 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
444 }
445 #elif defined(__ia64__)
446 void msi_msg_read_remap_rte(
447 struct msi_desc *msi_desc, struct msi_msg *msg)
448 {
449 /* TODO. */
450 }
452 void msi_msg_write_remap_rte(
453 struct msi_desc *msi_desc, struct msi_msg *msg)
454 {
455 /* TODO. */
456 }
457 #endif
459 int intremap_setup(struct iommu *iommu)
460 {
461 struct ir_ctrl *ir_ctrl;
462 s_time_t start_time;
464 if ( !ecap_intr_remap(iommu->ecap) )
465 return -ENODEV;
467 ir_ctrl = iommu_ir_ctrl(iommu);
468 if ( ir_ctrl->iremap_maddr == 0 )
469 {
470 ir_ctrl->iremap_maddr = alloc_pgtable_maddr();
471 if ( ir_ctrl->iremap_maddr == 0 )
472 {
473 dprintk(XENLOG_WARNING VTDPREFIX,
474 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
475 return -ENODEV;
476 }
477 ir_ctrl->iremap_index = -1;
478 }
480 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
481 /* set extended interrupt mode bit */
482 ir_ctrl->iremap_maddr |=
483 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
484 #endif
485 /* set size of the interrupt remapping table */
486 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
487 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
489 /* set SIRTP */
490 iommu->gcmd |= DMA_GCMD_SIRTP;
491 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
493 /* Make sure hardware complete it */
494 start_time = NOW();
495 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
496 {
497 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
498 {
499 dprintk(XENLOG_ERR VTDPREFIX,
500 "Cannot set SIRTP field for interrupt remapping\n");
501 return -ENODEV;
502 }
503 cpu_relax();
504 }
506 /* enable comaptiblity format interrupt pass through */
507 iommu->gcmd |= DMA_GCMD_CFI;
508 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
510 start_time = NOW();
511 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
512 {
513 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
514 {
515 dprintk(XENLOG_ERR VTDPREFIX,
516 "Cannot set CFI field for interrupt remapping\n");
517 return -ENODEV;
518 }
519 cpu_relax();
520 }
522 /* enable interrupt remapping hardware */
523 iommu->gcmd |= DMA_GCMD_IRE;
524 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
526 start_time = NOW();
527 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
528 {
529 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
530 {
531 dprintk(XENLOG_ERR VTDPREFIX,
532 "Cannot set IRE field for interrupt remapping\n");
533 return -ENODEV;
534 }
535 cpu_relax();
536 }
538 /* After set SIRTP, we should do globally invalidate the IEC */
539 iommu_flush_iec_global(iommu);
541 return 0;
542 }