ia64/xen-unstable

view xen/drivers/passthrough/vtd/intremap.c @ 17773:73a1daa9715f

vtd: build fix.
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 02 10:03:18 2008 +0100 (2008-06-02)
parents 1e66fa5931ee
children 37ff3322d4f3
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <xen/time.h>
25 #include <xen/pci.h>
26 #include <xen/pci_regs.h>
27 #include <asm/msi.h>
28 #include "iommu.h"
29 #include "dmar.h"
30 #include "vtd.h"
31 #include "extern.h"
33 u16 apicid_to_bdf(int apic_id)
34 {
35 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
36 struct acpi_ioapic_unit *acpi_ioapic_unit;
38 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
39 if ( acpi_ioapic_unit->apic_id == apic_id )
40 return acpi_ioapic_unit->ioapic.info;
42 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
43 return 0;
44 }
46 static void remap_entry_to_ioapic_rte(
47 struct iommu *iommu, struct IO_APIC_route_entry *old_rte)
48 {
49 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
50 struct IO_APIC_route_remap_entry *remap_rte;
51 int index = 0;
52 unsigned long flags;
53 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
55 if ( ir_ctrl == NULL || ir_ctrl->iremap_index < 0 )
56 {
57 dprintk(XENLOG_ERR VTDPREFIX,
58 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
59 return;
60 }
62 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
63 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
65 if ( index > ir_ctrl->iremap_index )
66 panic("%s: index (%d) is larger than remap table entry size (%d)!\n",
67 __func__, index, ir_ctrl->iremap_index);
69 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
71 iremap_entries =
72 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
73 iremap_entry = &iremap_entries[index];
75 old_rte->vector = iremap_entry->lo.vector;
76 old_rte->delivery_mode = iremap_entry->lo.dlm;
77 old_rte->dest_mode = iremap_entry->lo.dm;
78 old_rte->trigger = iremap_entry->lo.tm;
79 old_rte->__reserved_2 = 0;
80 old_rte->dest.logical.__reserved_1 = 0;
81 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
83 unmap_vtd_domain_page(iremap_entries);
84 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
85 }
87 static void ioapic_rte_to_remap_entry(struct iommu *iommu,
88 int apic_id, struct IO_APIC_route_entry *old_rte,
89 unsigned int rte_upper, unsigned int value)
90 {
91 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
92 struct iremap_entry new_ire;
93 struct IO_APIC_route_remap_entry *remap_rte;
94 struct IO_APIC_route_entry new_rte;
95 int index;
96 unsigned long flags;
97 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
99 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
100 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
102 if ( remap_rte->format == 0 )
103 {
104 ir_ctrl->iremap_index++;
105 index = ir_ctrl->iremap_index;
106 }
107 else
108 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
110 if ( index > IREMAP_ENTRY_NR - 1 )
111 panic("ioapic_rte_to_remap_entry: intremap index is more than 256!\n");
113 iremap_entries =
114 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
115 iremap_entry = &iremap_entries[index];
117 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
119 if ( rte_upper )
120 new_ire.lo.dst = (value >> 24) << 8;
121 else
122 {
123 *(((u32 *)&new_rte) + 0) = value;
124 new_ire.lo.fpd = 0;
125 new_ire.lo.dm = new_rte.dest_mode;
126 new_ire.lo.rh = 0;
127 new_ire.lo.tm = new_rte.trigger;
128 new_ire.lo.dlm = new_rte.delivery_mode;
129 new_ire.lo.avail = 0;
130 new_ire.lo.res_1 = 0;
131 new_ire.lo.vector = new_rte.vector;
132 new_ire.lo.res_2 = 0;
133 new_ire.hi.sid = apicid_to_bdf(apic_id);
135 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
136 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
137 new_ire.hi.res_1 = 0;
138 new_ire.lo.p = 1; /* finally, set present bit */
140 /* now construct new ioapic rte entry */
141 remap_rte->vector = new_rte.vector;
142 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
143 remap_rte->index_15 = (index >> 15) & 0x1;
144 remap_rte->index_0_14 = index & 0x7fff;
146 remap_rte->delivery_status = new_rte.delivery_status;
147 remap_rte->polarity = new_rte.polarity;
148 remap_rte->irr = new_rte.irr;
149 remap_rte->trigger = new_rte.trigger;
150 remap_rte->mask = new_rte.mask;
151 remap_rte->reserved = 0;
152 remap_rte->format = 1; /* indicate remap format */
153 }
155 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
156 iommu_flush_iec_index(iommu, 0, index);
157 invalidate_sync(iommu);
159 unmap_vtd_domain_page(iremap_entries);
160 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
161 return;
162 }
164 unsigned int io_apic_read_remap_rte(
165 unsigned int apic, unsigned int reg)
166 {
167 struct IO_APIC_route_entry old_rte = { 0 };
168 struct IO_APIC_route_remap_entry *remap_rte;
169 int rte_upper = (reg & 1) ? 1 : 0;
170 struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
171 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
173 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
174 {
175 *IO_APIC_BASE(apic) = reg;
176 return *(IO_APIC_BASE(apic)+4);
177 }
179 if ( rte_upper )
180 reg--;
182 /* read lower and upper 32-bits of rte entry */
183 *IO_APIC_BASE(apic) = reg;
184 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
185 *IO_APIC_BASE(apic) = reg + 1;
186 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
188 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
190 if ( remap_rte->mask || (remap_rte->format == 0) )
191 {
192 *IO_APIC_BASE(apic) = reg;
193 return *(IO_APIC_BASE(apic)+4);
194 }
196 remap_entry_to_ioapic_rte(iommu, &old_rte);
197 if ( rte_upper )
198 {
199 *IO_APIC_BASE(apic) = reg + 1;
200 return (*(((u32 *)&old_rte) + 1));
201 }
202 else
203 {
204 *IO_APIC_BASE(apic) = reg;
205 return (*(((u32 *)&old_rte) + 0));
206 }
207 }
209 void io_apic_write_remap_rte(
210 unsigned int apic, unsigned int reg, unsigned int value)
211 {
212 struct IO_APIC_route_entry old_rte = { 0 };
213 struct IO_APIC_route_remap_entry *remap_rte;
214 unsigned int rte_upper = (reg & 1) ? 1 : 0;
215 struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
216 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
217 int saved_mask;
219 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
220 {
221 *IO_APIC_BASE(apic) = reg;
222 *(IO_APIC_BASE(apic)+4) = value;
223 return;
224 }
226 if ( rte_upper )
227 reg--;
229 /* read both lower and upper 32-bits of rte entry */
230 *IO_APIC_BASE(apic) = reg;
231 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
232 *IO_APIC_BASE(apic) = reg + 1;
233 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
235 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
237 /* mask the interrupt while we change the intremap table */
238 saved_mask = remap_rte->mask;
239 remap_rte->mask = 1;
240 *IO_APIC_BASE(apic) = reg;
241 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
242 remap_rte->mask = saved_mask;
244 ioapic_rte_to_remap_entry(iommu, mp_ioapics[apic].mpc_apicid,
245 &old_rte, rte_upper, value);
247 /* write new entry to ioapic */
248 *IO_APIC_BASE(apic) = reg;
249 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
250 *IO_APIC_BASE(apic) = reg + 1;
251 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
252 }
254 static void remap_entry_to_msi_msg(
255 struct iommu *iommu, struct msi_msg *msg)
256 {
257 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
258 struct msi_msg_remap_entry *remap_rte;
259 int index;
260 unsigned long flags;
261 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
263 if ( ir_ctrl == NULL )
264 {
265 dprintk(XENLOG_ERR VTDPREFIX,
266 "remap_entry_to_msi_msg: ir_ctl == NULL");
267 return;
268 }
270 remap_rte = (struct msi_msg_remap_entry *) msg;
271 index = (remap_rte->address_lo.index_15 << 15) |
272 remap_rte->address_lo.index_0_14;
274 if ( index > ir_ctrl->iremap_index )
275 panic("%s: index (%d) is larger than remap table entry size (%d)\n",
276 __func__, index, ir_ctrl->iremap_index);
278 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
280 iremap_entries =
281 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
282 iremap_entry = &iremap_entries[index];
284 msg->address_hi = MSI_ADDR_BASE_HI;
285 msg->address_lo =
286 MSI_ADDR_BASE_LO |
287 ((iremap_entry->lo.dm == 0) ?
288 MSI_ADDR_DESTMODE_PHYS:
289 MSI_ADDR_DESTMODE_LOGIC) |
290 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
291 MSI_ADDR_REDIRECTION_CPU:
292 MSI_ADDR_REDIRECTION_LOWPRI) |
293 iremap_entry->lo.dst >> 8;
295 msg->data =
296 MSI_DATA_TRIGGER_EDGE |
297 MSI_DATA_LEVEL_ASSERT |
298 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
299 MSI_DATA_DELIVERY_FIXED:
300 MSI_DATA_DELIVERY_LOWPRI) |
301 iremap_entry->lo.vector;
303 unmap_vtd_domain_page(iremap_entries);
304 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
305 }
307 static void msi_msg_to_remap_entry(
308 struct iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
309 {
310 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
311 struct iremap_entry new_ire;
312 struct msi_msg_remap_entry *remap_rte;
313 unsigned int index;
314 unsigned long flags;
315 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
316 int i = 0;
318 remap_rte = (struct msi_msg_remap_entry *) msg;
319 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
321 iremap_entries =
322 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
324 /* If the entry for a PCI device has been there, use the old entry,
325 * Or, assign a new entry for it.
326 */
327 for ( i = 0; i <= ir_ctrl->iremap_index; i++ )
328 {
329 iremap_entry = &iremap_entries[i];
330 if ( iremap_entry->hi.sid ==
331 ((pdev->bus << 8) | pdev->devfn) )
332 break;
333 }
335 if ( i > ir_ctrl->iremap_index )
336 {
337 ir_ctrl->iremap_index++;
338 index = ir_ctrl->iremap_index;
339 }
340 else
341 index = i;
343 if ( index > IREMAP_ENTRY_NR - 1 )
344 panic("msi_msg_to_remap_entry: intremap index is more than 256!\n");
346 iremap_entry = &iremap_entries[index];
347 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
349 /* Set interrupt remapping table entry */
350 new_ire.lo.fpd = 0;
351 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
352 new_ire.lo.rh = 0;
353 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
354 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
355 new_ire.lo.avail = 0;
356 new_ire.lo.res_1 = 0;
357 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
358 MSI_DATA_VECTOR_MASK;
359 new_ire.lo.res_2 = 0;
360 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
361 & 0xff) << 8;
363 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
364 new_ire.hi.sq = 0;
365 new_ire.hi.svt = 1;
366 new_ire.hi.res_1 = 0;
367 new_ire.lo.p = 1; /* finally, set present bit */
369 /* now construct new MSI/MSI-X rte entry */
370 remap_rte->address_lo.dontcare = 0;
371 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
372 remap_rte->address_lo.index_0_14 = index & 0x7fff;
373 remap_rte->address_lo.SHV = 1;
374 remap_rte->address_lo.format = 1;
376 remap_rte->address_hi = 0;
377 remap_rte->data = 0;
379 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
380 iommu_flush_iec_index(iommu, 0, index);
381 invalidate_sync(iommu);
383 unmap_vtd_domain_page(iremap_entries);
384 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
385 return;
386 }
388 void msi_msg_read_remap_rte(
389 struct msi_desc *msi_desc, struct msi_msg *msg)
390 {
391 struct pci_dev *pdev = msi_desc->dev;
392 struct acpi_drhd_unit *drhd = NULL;
393 struct iommu *iommu = NULL;
394 struct ir_ctrl *ir_ctrl;
396 drhd = acpi_find_matched_drhd_unit(pdev);
397 iommu = drhd->iommu;
399 ir_ctrl = iommu_ir_ctrl(iommu);
400 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
401 return;
403 remap_entry_to_msi_msg(iommu, msg);
404 }
406 void msi_msg_write_remap_rte(
407 struct msi_desc *msi_desc, struct msi_msg *msg)
408 {
409 struct pci_dev *pdev = msi_desc->dev;
410 struct acpi_drhd_unit *drhd = NULL;
411 struct iommu *iommu = NULL;
412 struct ir_ctrl *ir_ctrl;
414 drhd = acpi_find_matched_drhd_unit(msi_desc->dev);
415 iommu = drhd->iommu;
417 ir_ctrl = iommu_ir_ctrl(iommu);
418 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
419 return;
421 msi_msg_to_remap_entry(iommu, pdev, msg);
422 }
424 int intremap_setup(struct iommu *iommu)
425 {
426 struct ir_ctrl *ir_ctrl;
427 s_time_t start_time;
429 if ( !ecap_intr_remap(iommu->ecap) )
430 return -ENODEV;
432 ir_ctrl = iommu_ir_ctrl(iommu);
433 if ( ir_ctrl->iremap_maddr == 0 )
434 {
435 ir_ctrl->iremap_maddr = alloc_pgtable_maddr();
436 if ( ir_ctrl->iremap_maddr == 0 )
437 {
438 dprintk(XENLOG_WARNING VTDPREFIX,
439 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
440 return -ENODEV;
441 }
442 ir_ctrl->iremap_index = -1;
443 }
445 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
446 /* set extended interrupt mode bit */
447 ir_ctrl->iremap_maddr |=
448 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0;
449 #endif
450 /* size field = 256 entries per 4K page = 8 - 1 */
451 ir_ctrl->iremap_maddr |= 7;
452 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
454 /* set SIRTP */
455 iommu->gcmd |= DMA_GCMD_SIRTP;
456 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
458 /* Make sure hardware complete it */
459 start_time = NOW();
460 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
461 {
462 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
463 {
464 dprintk(XENLOG_ERR VTDPREFIX,
465 "Cannot set SIRTP field for interrupt remapping\n");
466 return -ENODEV;
467 }
468 cpu_relax();
469 }
471 /* enable comaptiblity format interrupt pass through */
472 iommu->gcmd |= DMA_GCMD_CFI;
473 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
475 start_time = NOW();
476 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
477 {
478 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
479 {
480 dprintk(XENLOG_ERR VTDPREFIX,
481 "Cannot set CFI field for interrupt remapping\n");
482 return -ENODEV;
483 }
484 cpu_relax();
485 }
487 /* enable interrupt remapping hardware */
488 iommu->gcmd |= DMA_GCMD_IRE;
489 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
491 start_time = NOW();
492 while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
493 {
494 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
495 {
496 dprintk(XENLOG_ERR VTDPREFIX,
497 "Cannot set IRE field for interrupt remapping\n");
498 return -ENODEV;
499 }
500 cpu_relax();
501 }
503 /* After set SIRTP, we should do globally invalidate the IEC */
504 iommu_flush_iec_global(iommu);
506 return 0;
507 }