ia64/xen-unstable

annotate xen/drivers/passthrough/vtd/intremap.c @ 19734:4fb8a6c993e2

VT-d: correct way to submit command to GCMD register

Per VT-d spec, software should submit only one "incremental" command
at a time to Global Command reigster. Current implementation uses a
variable (gcmd) to record the state of Global Status register. It's
error prone.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 09:29:42 2009 +0100 (2009-06-05)
parents a69daf23602a
children fa51db0871e1
rev   line source
keir@17099 1 /*
keir@17099 2 * Copyright (c) 2006, Intel Corporation.
keir@17099 3 *
keir@17099 4 * This program is free software; you can redistribute it and/or modify it
keir@17099 5 * under the terms and conditions of the GNU General Public License,
keir@17099 6 * version 2, as published by the Free Software Foundation.
keir@17099 7 *
keir@17099 8 * This program is distributed in the hope it will be useful, but WITHOUT
keir@17099 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
keir@17099 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
keir@17099 11 * more details.
keir@17099 12 *
keir@17099 13 * You should have received a copy of the GNU General Public License along with
keir@17099 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
keir@17099 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
keir@17099 16 *
keir@17099 17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
keir@17099 18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
keir@17099 19 */
keir@17099 20
keir@17099 21 #include <xen/irq.h>
keir@17099 22 #include <xen/sched.h>
keir@17212 23 #include <xen/iommu.h>
keir@18658 24 #include <asm/hvm/iommu.h>
keir@17434 25 #include <xen/time.h>
keir@19602 26 #include <xen/list.h>
keir@17443 27 #include <xen/pci.h>
keir@17540 28 #include <xen/pci_regs.h>
keir@17212 29 #include "iommu.h"
keir@17099 30 #include "dmar.h"
keir@17099 31 #include "vtd.h"
keir@17099 32 #include "extern.h"
keir@17099 33
keir@19707 34 #ifdef __ia64__
keir@18798 35 #define dest_SMI -1
keir@19727 36 #define nr_ioapics iosapic_get_nr_iosapics()
keir@19727 37 #define nr_ioapic_registers(i) iosapic_get_nr_pins(i)
keir@19727 38 #else
keir@19727 39 #define nr_ioapic_registers(i) nr_ioapic_registers[i]
keir@18798 40 #endif
keir@18798 41
keir@19707 42 /* apic_pin_2_ir_idx[apicid][pin] = interrupt remapping table index */
keir@19707 43 static unsigned int **apic_pin_2_ir_idx;
keir@19420 44
keir@19707 45 static int init_apic_pin_2_ir_idx(void)
keir@19602 46 {
keir@19707 47 unsigned int *_apic_pin_2_ir_idx;
keir@19707 48 unsigned int nr_pins, i;
keir@19602 49
keir@19707 50 nr_pins = 0;
keir@19707 51 for ( i = 0; i < nr_ioapics; i++ )
keir@19727 52 nr_pins += nr_ioapic_registers(i);
keir@19602 53
keir@19707 54 _apic_pin_2_ir_idx = xmalloc_array(unsigned int, nr_pins);
keir@19707 55 apic_pin_2_ir_idx = xmalloc_array(unsigned int *, nr_ioapics);
keir@19707 56 if ( (_apic_pin_2_ir_idx == NULL) || (apic_pin_2_ir_idx == NULL) )
keir@19602 57 {
keir@19707 58 xfree(_apic_pin_2_ir_idx);
keir@19707 59 xfree(apic_pin_2_ir_idx);
keir@19707 60 return -ENOMEM;
keir@19602 61 }
keir@19602 62
keir@19707 63 for ( i = 0; i < nr_pins; i++ )
keir@19707 64 _apic_pin_2_ir_idx[i] = -1;
keir@19602 65
keir@19707 66 nr_pins = 0;
keir@19707 67 for ( i = 0; i < nr_ioapics; i++ )
keir@19707 68 {
keir@19707 69 apic_pin_2_ir_idx[i] = &_apic_pin_2_ir_idx[nr_pins];
keir@19727 70 nr_pins += nr_ioapic_registers(i);
keir@19707 71 }
keir@19602 72
keir@19602 73 return 0;
keir@19602 74 }
keir@19420 75
keir@17099 76 u16 apicid_to_bdf(int apic_id)
keir@17099 77 {
keir@17099 78 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
keir@17099 79 struct acpi_ioapic_unit *acpi_ioapic_unit;
keir@17099 80
keir@17099 81 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
keir@17099 82 if ( acpi_ioapic_unit->apic_id == apic_id )
keir@17099 83 return acpi_ioapic_unit->ioapic.info;
keir@17099 84
keir@17099 85 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
keir@17099 86 return 0;
keir@17099 87 }
keir@17099 88
keir@18372 89 static int remap_entry_to_ioapic_rte(
keir@18600 90 struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
keir@17099 91 {
keir@17432 92 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
keir@17099 93 struct IO_APIC_route_remap_entry *remap_rte;
keir@17742 94 int index = 0;
keir@17099 95 unsigned long flags;
keir@17099 96 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17099 97
keir@17910 98 if ( ir_ctrl == NULL )
keir@17099 99 {
keir@17099 100 dprintk(XENLOG_ERR VTDPREFIX,
keir@17742 101 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
keir@18372 102 return -EFAULT;
keir@17099 103 }
keir@17099 104
keir@17099 105 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
keir@17773 106 index = (remap_rte->index_15 << 15) | remap_rte->index_0_14;
keir@17099 107
keir@17099 108 if ( index > ir_ctrl->iremap_index )
keir@18372 109 {
keir@18372 110 dprintk(XENLOG_ERR VTDPREFIX,
keir@18372 111 "%s: index (%d) is larger than remap table entry size (%d)!\n",
keir@18372 112 __func__, index, ir_ctrl->iremap_index);
keir@18372 113 return -EFAULT;
keir@18372 114 }
keir@17099 115
keir@17099 116 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
keir@17099 117
keir@17432 118 iremap_entries =
keir@17432 119 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
keir@17432 120 iremap_entry = &iremap_entries[index];
keir@17099 121
keir@17099 122 old_rte->vector = iremap_entry->lo.vector;
keir@17099 123 old_rte->delivery_mode = iremap_entry->lo.dlm;
keir@17099 124 old_rte->dest_mode = iremap_entry->lo.dm;
keir@17099 125 old_rte->trigger = iremap_entry->lo.tm;
keir@17099 126 old_rte->__reserved_2 = 0;
keir@17099 127 old_rte->dest.logical.__reserved_1 = 0;
keir@17742 128 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
keir@17099 129
keir@17432 130 unmap_vtd_domain_page(iremap_entries);
keir@17099 131 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 132 return 0;
keir@17099 133 }
keir@17099 134
keir@18372 135 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
keir@19707 136 int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
keir@17742 137 unsigned int rte_upper, unsigned int value)
keir@17099 138 {
keir@17432 139 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
keir@17742 140 struct iremap_entry new_ire;
keir@17099 141 struct IO_APIC_route_remap_entry *remap_rte;
keir@18600 142 struct IO_xAPIC_route_entry new_rte;
keir@17742 143 int index;
keir@17099 144 unsigned long flags;
keir@17099 145 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17099 146
keir@17099 147 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
keir@17099 148 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
keir@17742 149
keir@19707 150 index = apic_pin_2_ir_idx[apic][ioapic_pin];
keir@19602 151 if ( index < 0 )
keir@17099 152 {
keir@17742 153 ir_ctrl->iremap_index++;
keir@17742 154 index = ir_ctrl->iremap_index;
keir@19707 155 apic_pin_2_ir_idx[apic][ioapic_pin] = index;
keir@17099 156 }
keir@17742 157
keir@17742 158 if ( index > IREMAP_ENTRY_NR - 1 )
keir@18372 159 {
keir@18372 160 dprintk(XENLOG_ERR VTDPREFIX,
keir@18372 161 "%s: intremap index (%d) is larger than"
keir@18372 162 " the maximum index (%ld)!\n",
keir@18372 163 __func__, index, IREMAP_ENTRY_NR - 1);
keir@18372 164 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 165 return -EFAULT;
keir@18372 166 }
keir@17099 167
keir@17432 168 iremap_entries =
keir@17432 169 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
keir@17432 170 iremap_entry = &iremap_entries[index];
keir@17432 171
keir@17742 172 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
keir@17742 173
keir@17742 174 if ( rte_upper )
keir@18675 175 {
keir@18675 176 #if defined(__i386__) || defined(__x86_64__)
keir@17742 177 new_ire.lo.dst = (value >> 24) << 8;
keir@18675 178 #else /* __ia64__ */
keir@18675 179 new_ire.lo.dst = value >> 16;
keir@18675 180 #endif
keir@18675 181 }
keir@17742 182 else
keir@17742 183 {
keir@17742 184 *(((u32 *)&new_rte) + 0) = value;
keir@17742 185 new_ire.lo.fpd = 0;
keir@17742 186 new_ire.lo.dm = new_rte.dest_mode;
keir@17742 187 new_ire.lo.rh = 0;
keir@17742 188 new_ire.lo.tm = new_rte.trigger;
keir@17742 189 new_ire.lo.dlm = new_rte.delivery_mode;
keir@17742 190 new_ire.lo.avail = 0;
keir@17742 191 new_ire.lo.res_1 = 0;
keir@17742 192 new_ire.lo.vector = new_rte.vector;
keir@17742 193 new_ire.lo.res_2 = 0;
keir@19707 194 new_ire.hi.sid = apicid_to_bdf(IO_APIC_ID(apic));
keir@17742 195
keir@17742 196 new_ire.hi.sq = 0; /* comparing all 16-bit of SID */
keir@17742 197 new_ire.hi.svt = 1; /* requestor ID verification SID/SQ */
keir@17742 198 new_ire.hi.res_1 = 0;
keir@17742 199 new_ire.lo.p = 1; /* finally, set present bit */
keir@17742 200
keir@17742 201 /* now construct new ioapic rte entry */
keir@17742 202 remap_rte->vector = new_rte.vector;
keir@17742 203 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
keir@17773 204 remap_rte->index_15 = (index >> 15) & 0x1;
keir@17742 205 remap_rte->index_0_14 = index & 0x7fff;
keir@17742 206
keir@17742 207 remap_rte->delivery_status = new_rte.delivery_status;
keir@17742 208 remap_rte->polarity = new_rte.polarity;
keir@17742 209 remap_rte->irr = new_rte.irr;
keir@17742 210 remap_rte->trigger = new_rte.trigger;
keir@17742 211 remap_rte->mask = new_rte.mask;
keir@17742 212 remap_rte->reserved = 0;
keir@17742 213 remap_rte->format = 1; /* indicate remap format */
keir@17742 214 }
keir@17742 215
keir@17742 216 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
keir@17936 217 iommu_flush_cache_entry(iremap_entry);
keir@17742 218 iommu_flush_iec_index(iommu, 0, index);
keir@17742 219 invalidate_sync(iommu);
keir@17099 220
keir@17432 221 unmap_vtd_domain_page(iremap_entries);
keir@17099 222 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 223 return 0;
keir@17099 224 }
keir@17099 225
keir@17742 226 unsigned int io_apic_read_remap_rte(
keir@17099 227 unsigned int apic, unsigned int reg)
keir@17099 228 {
keir@18600 229 struct IO_xAPIC_route_entry old_rte = { 0 };
keir@17099 230 struct IO_APIC_route_remap_entry *remap_rte;
keir@17099 231 int rte_upper = (reg & 1) ? 1 : 0;
keir@18658 232 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
keir@17099 233 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17099 234
keir@17910 235 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
keir@17910 236 ir_ctrl->iremap_index == -1 )
keir@17099 237 {
keir@17099 238 *IO_APIC_BASE(apic) = reg;
keir@17099 239 return *(IO_APIC_BASE(apic)+4);
keir@17099 240 }
keir@17099 241
keir@17099 242 if ( rte_upper )
keir@17099 243 reg--;
keir@17099 244
keir@17099 245 /* read lower and upper 32-bits of rte entry */
keir@17099 246 *IO_APIC_BASE(apic) = reg;
keir@17099 247 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
keir@17099 248 *IO_APIC_BASE(apic) = reg + 1;
keir@17099 249 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
keir@17099 250
keir@17099 251 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
keir@17099 252
keir@18781 253 if ( (remap_rte->format == 0) || (old_rte.delivery_mode == dest_SMI) )
keir@17099 254 {
keir@18372 255 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
keir@17099 256 return *(IO_APIC_BASE(apic)+4);
keir@17099 257 }
keir@17099 258
keir@18372 259 if ( remap_entry_to_ioapic_rte(iommu, &old_rte) )
keir@17099 260 {
keir@18372 261 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
keir@18372 262 return *(IO_APIC_BASE(apic)+4);
keir@17099 263 }
keir@18372 264
keir@18372 265 if ( rte_upper )
keir@18372 266 return (*(((u32 *)&old_rte) + 1));
keir@17099 267 else
keir@17099 268 return (*(((u32 *)&old_rte) + 0));
keir@17099 269 }
keir@17099 270
keir@17742 271 void io_apic_write_remap_rte(
keir@17099 272 unsigned int apic, unsigned int reg, unsigned int value)
keir@17099 273 {
keir@19420 274 unsigned int ioapic_pin = (reg - 0x10) / 2;
keir@18600 275 struct IO_xAPIC_route_entry old_rte = { 0 };
keir@17099 276 struct IO_APIC_route_remap_entry *remap_rte;
keir@17742 277 unsigned int rte_upper = (reg & 1) ? 1 : 0;
keir@18658 278 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
keir@17099 279 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17742 280 int saved_mask;
keir@17099 281
keir@17432 282 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
keir@17099 283 {
keir@17099 284 *IO_APIC_BASE(apic) = reg;
keir@17099 285 *(IO_APIC_BASE(apic)+4) = value;
keir@17099 286 return;
keir@17099 287 }
keir@17099 288
keir@17099 289 if ( rte_upper )
keir@17099 290 reg--;
keir@17099 291
keir@17099 292 /* read both lower and upper 32-bits of rte entry */
keir@17099 293 *IO_APIC_BASE(apic) = reg;
keir@17099 294 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
keir@17099 295 *IO_APIC_BASE(apic) = reg + 1;
keir@17099 296 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
keir@17099 297
keir@17099 298 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
keir@17099 299
keir@18781 300 if ( old_rte.delivery_mode == dest_SMI )
keir@18781 301 {
keir@18781 302 /* Some BIOS does not zero out reserve fields in IOAPIC
keir@18781 303 * RTE's. clear_IO_APIC() zeroes out all RTE's except for RTE
keir@18781 304 * with MSI delivery type. This is a problem when the host
keir@18781 305 * OS converts SMI delivery type to some other type but leaving
keir@18781 306 * the reserved field uninitialized. This can cause interrupt
keir@18781 307 * remapping table out of bound error if "format" field is 1
keir@18781 308 * and the "index" field has a value that that is larger than
keir@18781 309 * the maximum index of interrupt remapping table.
keir@18781 310 */
keir@18781 311 if ( remap_rte->format == 1 )
keir@18781 312 {
keir@18781 313 remap_rte->format = 0;
keir@18781 314 *IO_APIC_BASE(apic) = reg;
keir@18781 315 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
keir@18781 316 *IO_APIC_BASE(apic) = reg + 1;
keir@18781 317 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
keir@18781 318 }
keir@18781 319
keir@18781 320 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
keir@18781 321 *(IO_APIC_BASE(apic)+4) = value;
keir@18781 322 return;
keir@18781 323 }
keir@18781 324
keir@17742 325 /* mask the interrupt while we change the intremap table */
keir@17742 326 saved_mask = remap_rte->mask;
keir@17742 327 remap_rte->mask = 1;
keir@17742 328 *IO_APIC_BASE(apic) = reg;
keir@17742 329 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
keir@17742 330 remap_rte->mask = saved_mask;
keir@17742 331
keir@19707 332 if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin,
keir@18372 333 &old_rte, rte_upper, value) )
keir@18372 334 {
keir@18372 335 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
keir@18372 336 *(IO_APIC_BASE(apic)+4) = value;
keir@18372 337 return;
keir@18372 338 }
keir@17099 339
keir@17099 340 /* write new entry to ioapic */
keir@17099 341 *IO_APIC_BASE(apic) = reg;
keir@17742 342 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
keir@17099 343 *IO_APIC_BASE(apic) = reg + 1;
keir@17742 344 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
keir@17099 345 }
keir@17099 346
keir@18625 347 #if defined(__i386__) || defined(__x86_64__)
keir@18372 348 static int remap_entry_to_msi_msg(
keir@17743 349 struct iommu *iommu, struct msi_msg *msg)
keir@17743 350 {
keir@17743 351 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
keir@17743 352 struct msi_msg_remap_entry *remap_rte;
keir@17743 353 int index;
keir@17743 354 unsigned long flags;
keir@17743 355 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17743 356
keir@17743 357 if ( ir_ctrl == NULL )
keir@17743 358 {
keir@17743 359 dprintk(XENLOG_ERR VTDPREFIX,
keir@17743 360 "remap_entry_to_msi_msg: ir_ctl == NULL");
keir@18372 361 return -EFAULT;
keir@17743 362 }
keir@17743 363
keir@17743 364 remap_rte = (struct msi_msg_remap_entry *) msg;
keir@17743 365 index = (remap_rte->address_lo.index_15 << 15) |
keir@17773 366 remap_rte->address_lo.index_0_14;
keir@17743 367
keir@17743 368 if ( index > ir_ctrl->iremap_index )
keir@18372 369 {
keir@18372 370 dprintk(XENLOG_ERR VTDPREFIX,
keir@18372 371 "%s: index (%d) is larger than remap table entry size (%d)\n",
keir@18372 372 __func__, index, ir_ctrl->iremap_index);
keir@18372 373 return -EFAULT;
keir@18372 374 }
keir@17743 375
keir@17743 376 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
keir@17743 377
keir@17743 378 iremap_entries =
keir@17743 379 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
keir@17743 380 iremap_entry = &iremap_entries[index];
keir@17743 381
keir@17743 382 msg->address_hi = MSI_ADDR_BASE_HI;
keir@17743 383 msg->address_lo =
keir@17743 384 MSI_ADDR_BASE_LO |
keir@17743 385 ((iremap_entry->lo.dm == 0) ?
keir@17743 386 MSI_ADDR_DESTMODE_PHYS:
keir@17743 387 MSI_ADDR_DESTMODE_LOGIC) |
keir@17743 388 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
keir@17743 389 MSI_ADDR_REDIRECTION_CPU:
keir@17743 390 MSI_ADDR_REDIRECTION_LOWPRI) |
keir@17743 391 iremap_entry->lo.dst >> 8;
keir@17743 392
keir@17743 393 msg->data =
keir@17743 394 MSI_DATA_TRIGGER_EDGE |
keir@17743 395 MSI_DATA_LEVEL_ASSERT |
keir@17743 396 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
keir@17743 397 MSI_DATA_DELIVERY_FIXED:
keir@17743 398 MSI_DATA_DELIVERY_LOWPRI) |
keir@17743 399 iremap_entry->lo.vector;
keir@17743 400
keir@17743 401 unmap_vtd_domain_page(iremap_entries);
keir@17743 402 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 403 return 0;
keir@17743 404 }
keir@17743 405
keir@18372 406 static int msi_msg_to_remap_entry(
keir@18638 407 struct iommu *iommu, struct pci_dev *pdev,
keir@18638 408 struct msi_desc *msi_desc, struct msi_msg *msg)
keir@17743 409 {
keir@17743 410 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
keir@17743 411 struct iremap_entry new_ire;
keir@17743 412 struct msi_msg_remap_entry *remap_rte;
keir@17743 413 unsigned int index;
keir@17743 414 unsigned long flags;
keir@17743 415 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
keir@17743 416
keir@17743 417 remap_rte = (struct msi_msg_remap_entry *) msg;
keir@17743 418 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
keir@17743 419
keir@18638 420 if ( msi_desc->remap_index < 0 )
keir@17743 421 {
keir@18638 422 ir_ctrl->iremap_index++;
keir@17743 423 index = ir_ctrl->iremap_index;
keir@18638 424 msi_desc->remap_index = index;
keir@17743 425 }
keir@17743 426 else
keir@18638 427 index = msi_desc->remap_index;
keir@17743 428
keir@17743 429 if ( index > IREMAP_ENTRY_NR - 1 )
keir@18372 430 {
keir@18372 431 dprintk(XENLOG_ERR VTDPREFIX,
keir@18372 432 "%s: intremap index (%d) is larger than"
keir@18372 433 " the maximum index (%ld)!\n",
keir@18372 434 __func__, index, IREMAP_ENTRY_NR - 1);
keir@18638 435 msi_desc->remap_index = -1;
keir@18372 436 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 437 return -EFAULT;
keir@18372 438 }
keir@17743 439
keir@18638 440 iremap_entries =
keir@18638 441 (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
keir@17743 442 iremap_entry = &iremap_entries[index];
keir@17743 443 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
keir@17743 444
keir@17743 445 /* Set interrupt remapping table entry */
keir@17743 446 new_ire.lo.fpd = 0;
keir@17743 447 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
keir@17743 448 new_ire.lo.rh = 0;
keir@17743 449 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
keir@17743 450 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
keir@17743 451 new_ire.lo.avail = 0;
keir@17743 452 new_ire.lo.res_1 = 0;
keir@17743 453 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
keir@17743 454 MSI_DATA_VECTOR_MASK;
keir@17743 455 new_ire.lo.res_2 = 0;
keir@17743 456 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
keir@17743 457 & 0xff) << 8;
keir@17743 458
keir@17743 459 new_ire.hi.sid = (pdev->bus << 8) | pdev->devfn;
keir@17743 460 new_ire.hi.sq = 0;
keir@17743 461 new_ire.hi.svt = 1;
keir@17743 462 new_ire.hi.res_1 = 0;
keir@17743 463 new_ire.lo.p = 1; /* finally, set present bit */
keir@17743 464
keir@17743 465 /* now construct new MSI/MSI-X rte entry */
keir@17743 466 remap_rte->address_lo.dontcare = 0;
keir@17773 467 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
keir@17743 468 remap_rte->address_lo.index_0_14 = index & 0x7fff;
keir@17743 469 remap_rte->address_lo.SHV = 1;
keir@17743 470 remap_rte->address_lo.format = 1;
keir@17743 471
keir@17743 472 remap_rte->address_hi = 0;
keir@17743 473 remap_rte->data = 0;
keir@17743 474
keir@17743 475 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
keir@17936 476 iommu_flush_cache_entry(iremap_entry);
keir@17743 477 iommu_flush_iec_index(iommu, 0, index);
keir@17743 478 invalidate_sync(iommu);
keir@17743 479
keir@17743 480 unmap_vtd_domain_page(iremap_entries);
keir@17743 481 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
keir@18372 482 return 0;
keir@17743 483 }
keir@17743 484
keir@17743 485 void msi_msg_read_remap_rte(
keir@17743 486 struct msi_desc *msi_desc, struct msi_msg *msg)
keir@17743 487 {
keir@17743 488 struct pci_dev *pdev = msi_desc->dev;
keir@17743 489 struct acpi_drhd_unit *drhd = NULL;
keir@17743 490 struct iommu *iommu = NULL;
keir@17743 491 struct ir_ctrl *ir_ctrl;
keir@17743 492
keir@19402 493 drhd = acpi_find_matched_drhd_unit(pdev);
keir@17743 494 iommu = drhd->iommu;
keir@17743 495
keir@17743 496 ir_ctrl = iommu_ir_ctrl(iommu);
keir@17743 497 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
keir@17743 498 return;
keir@17743 499
keir@17743 500 remap_entry_to_msi_msg(iommu, msg);
keir@17743 501 }
keir@17743 502
keir@17743 503 void msi_msg_write_remap_rte(
keir@17743 504 struct msi_desc *msi_desc, struct msi_msg *msg)
keir@17743 505 {
keir@17743 506 struct pci_dev *pdev = msi_desc->dev;
keir@17743 507 struct acpi_drhd_unit *drhd = NULL;
keir@17743 508 struct iommu *iommu = NULL;
keir@17743 509 struct ir_ctrl *ir_ctrl;
keir@17743 510
keir@19402 511 drhd = acpi_find_matched_drhd_unit(pdev);
keir@17743 512 iommu = drhd->iommu;
keir@17743 513
keir@17743 514 ir_ctrl = iommu_ir_ctrl(iommu);
keir@17743 515 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
keir@17743 516 return;
keir@17743 517
keir@18638 518 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
keir@17743 519 }
keir@18625 520 #elif defined(__ia64__)
keir@18625 521 void msi_msg_read_remap_rte(
keir@18625 522 struct msi_desc *msi_desc, struct msi_msg *msg)
keir@18625 523 {
keir@18625 524 /* TODO. */
keir@18625 525 }
keir@18625 526
keir@18625 527 void msi_msg_write_remap_rte(
keir@18625 528 struct msi_desc *msi_desc, struct msi_msg *msg)
keir@18625 529 {
keir@18625 530 /* TODO. */
keir@18625 531 }
keir@18625 532 #endif
keir@17743 533
keir@19420 534 int enable_intremap(struct iommu *iommu)
keir@17099 535 {
keir@17099 536 struct ir_ctrl *ir_ctrl;
keir@19734 537 u32 sts, gcmd;
keir@17099 538
keir@19420 539 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
keir@17099 540
keir@17099 541 ir_ctrl = iommu_ir_ctrl(iommu);
keir@17432 542 if ( ir_ctrl->iremap_maddr == 0 )
keir@17099 543 {
keir@19187 544 ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
keir@17432 545 if ( ir_ctrl->iremap_maddr == 0 )
keir@17099 546 {
keir@17099 547 dprintk(XENLOG_WARNING VTDPREFIX,
keir@17432 548 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
keir@18663 549 return -ENOMEM;
keir@17099 550 }
keir@17742 551 ir_ctrl->iremap_index = -1;
keir@17099 552 }
keir@17099 553
keir@17099 554 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
keir@17099 555 /* set extended interrupt mode bit */
keir@17432 556 ir_ctrl->iremap_maddr |=
keir@18650 557 ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
keir@17099 558 #endif
keir@19420 559 /* set size of the interrupt remapping table */
keir@18650 560 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
keir@17432 561 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
keir@17099 562
keir@17099 563 /* set SIRTP */
keir@19734 564 gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG);
keir@19734 565 gcmd |= DMA_GCMD_SIRTP;
keir@19734 566 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
keir@17099 567
keir@19733 568 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 569 (sts & DMA_GSTS_SIRTPS), sts);
keir@19733 570
keir@17099 571 /* enable comaptiblity format interrupt pass through */
keir@19734 572 gcmd |= DMA_GCMD_CFI;
keir@19734 573 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
keir@17099 574
keir@19733 575 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 576 (sts & DMA_GSTS_CFIS), sts);
keir@17099 577
keir@17099 578 /* enable interrupt remapping hardware */
keir@19734 579 gcmd |= DMA_GCMD_IRE;
keir@19734 580 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
keir@17099 581
keir@19733 582 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 583 (sts & DMA_GSTS_IRES), sts);
keir@17099 584
keir@17099 585 /* After set SIRTP, we should do globally invalidate the IEC */
keir@17099 586 iommu_flush_iec_global(iommu);
keir@17099 587
keir@19707 588 return init_apic_pin_2_ir_idx();
keir@17099 589 }
keir@19420 590
keir@19420 591 void disable_intremap(struct iommu *iommu)
keir@19420 592 {
keir@19733 593 u32 sts;
keir@19420 594
keir@19420 595 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
keir@19420 596
keir@19734 597 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
keir@19734 598 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
keir@19420 599
keir@19733 600 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 601 !(sts & DMA_GSTS_IRES), sts);
keir@19420 602 }