ia64/xen-unstable

view xen/drivers/passthrough/amd/iommu_intr.c @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 1695a86b3d7c
children
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Wei Wang <wei.wang2@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
20 #include <xen/sched.h>
21 #include <xen/hvm/iommu.h>
22 #include <asm/amd-iommu.h>
23 #include <asm/hvm/svm/amd-iommu-proto.h>
25 #define INTREMAP_TABLE_ORDER 1
26 static DEFINE_SPINLOCK(int_remap_table_lock);
27 void *int_remap_table = NULL;
29 static u8 *get_intremap_entry(u8 vector, u8 dm)
30 {
31 u8 *table;
32 int offset = 0;
33 table = (u8*)int_remap_table;
35 BUG_ON( !table );
36 offset = (dm << INT_REMAP_INDEX_DM_SHIFT) & INT_REMAP_INDEX_DM_MASK;
37 offset |= (vector << INT_REMAP_INDEX_VECTOR_SHIFT ) &
38 INT_REMAP_INDEX_VECTOR_MASK;
40 return (u8*) (table + offset);
41 }
43 static void update_intremap_entry(u32* entry, u8 vector, u8 int_type,
44 u8 dest_mode, u8 dest)
45 {
46 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
47 INT_REMAP_ENTRY_REMAPEN_MASK,
48 INT_REMAP_ENTRY_REMAPEN_SHIFT, entry);
49 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
50 INT_REMAP_ENTRY_SUPIOPF_MASK,
51 INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry);
52 set_field_in_reg_u32(int_type, *entry,
53 INT_REMAP_ENTRY_INTTYPE_MASK,
54 INT_REMAP_ENTRY_INTTYPE_SHIFT, entry);
55 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
56 INT_REMAP_ENTRY_REQEOI_MASK,
57 INT_REMAP_ENTRY_REQEOI_SHIFT, entry);
58 set_field_in_reg_u32((u32)dest_mode, *entry,
59 INT_REMAP_ENTRY_DM_MASK,
60 INT_REMAP_ENTRY_DM_SHIFT, entry);
61 set_field_in_reg_u32((u32)dest, *entry,
62 INT_REMAP_ENTRY_DEST_MAST,
63 INT_REMAP_ENTRY_DEST_SHIFT, entry);
64 set_field_in_reg_u32((u32)vector, *entry,
65 INT_REMAP_ENTRY_VECTOR_MASK,
66 INT_REMAP_ENTRY_VECTOR_SHIFT, entry);
67 }
69 void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
70 {
71 u32 cmd[4], entry;
73 cmd[3] = cmd[2] = 0;
74 set_field_in_reg_u32(device_id, 0,
75 IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
76 IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
77 cmd[0] = entry;
78 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
79 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
80 &entry);
81 cmd[1] = entry;
82 send_iommu_command(iommu, cmd);
83 }
85 static void update_intremap_entry_from_ioapic(
86 struct IO_APIC_route_entry *ioapic_rte,
87 unsigned int rte_upper, unsigned int value)
88 {
89 unsigned long flags;
90 u32* entry;
91 u8 delivery_mode, dest, vector, dest_mode;
92 struct IO_APIC_route_entry *rte = ioapic_rte;
94 spin_lock_irqsave(&int_remap_table_lock, flags);
96 if ( rte_upper )
97 {
98 dest = (value >> 24) & 0xFF;
99 delivery_mode = rte->delivery_mode;
100 vector = rte->vector;
101 dest_mode = rte->dest_mode;
102 entry = (u32*)get_intremap_entry((u8)rte->vector,
103 (u8)rte->delivery_mode);
104 update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
105 }
107 spin_unlock_irqrestore(&int_remap_table_lock, flags);
108 return;
109 }
111 int __init amd_iommu_setup_intremap_table(void)
112 {
113 struct IO_APIC_route_entry rte = {0};
114 unsigned long flags;
115 u32* entry;
116 int apic, pin;
117 u8 delivery_mode, dest, vector, dest_mode;
119 if ( int_remap_table == NULL )
120 {
121 int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
122 if ( int_remap_table == NULL )
123 return -ENOMEM;
124 memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
125 }
127 /* Read ioapic entries and update interrupt remapping table accordingly */
128 for ( apic = 0; apic < nr_ioapics; apic++ )
129 {
130 for ( pin = 0; pin < nr_ioapic_registers[apic]; pin++ )
131 {
132 *(((int *)&rte) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
133 *(((int *)&rte) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
135 if ( rte.mask == 1 )
136 continue;
138 delivery_mode = rte.delivery_mode;
139 vector = rte.vector;
140 dest_mode = rte.dest_mode;
141 if ( dest_mode == 0 )
142 dest = rte.dest.physical.physical_dest & 0xf;
143 else
144 dest = rte.dest.logical.logical_dest & 0xff;
146 spin_lock_irqsave(&int_remap_table_lock, flags);
147 entry = (u32*)get_intremap_entry(vector, delivery_mode);
148 update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
149 spin_unlock_irqrestore(&int_remap_table_lock, flags);
150 }
151 }
152 return 0;
153 }
155 void amd_iommu_ioapic_update_ire(
156 unsigned int apic, unsigned int reg, unsigned int value)
157 {
158 struct IO_APIC_route_entry ioapic_rte = { 0 };
159 unsigned int rte_upper = (reg & 1) ? 1 : 0;
160 int saved_mask;
162 *IO_APIC_BASE(apic) = reg;
163 *(IO_APIC_BASE(apic)+4) = value;
165 if ( int_remap_table == NULL )
166 return;
167 if ( !rte_upper )
168 return;
170 reg--;
171 /* read both lower and upper 32-bits of rte entry */
172 *IO_APIC_BASE(apic) = reg;
173 *(((u32 *)&ioapic_rte) + 0) = *(IO_APIC_BASE(apic)+4);
174 *IO_APIC_BASE(apic) = reg + 1;
175 *(((u32 *)&ioapic_rte) + 1) = *(IO_APIC_BASE(apic)+4);
177 /* mask the interrupt while we change the intremap table */
178 saved_mask = ioapic_rte.mask;
179 ioapic_rte.mask = 1;
180 *IO_APIC_BASE(apic) = reg;
181 *(IO_APIC_BASE(apic)+4) = *(((int *)&ioapic_rte)+0);
182 ioapic_rte.mask = saved_mask;
184 update_intremap_entry_from_ioapic(&ioapic_rte, rte_upper, value);
186 /* unmask the interrupt after we have updated the intremap table */
187 *IO_APIC_BASE(apic) = reg;
188 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&ioapic_rte)+0);
189 }
191 static void update_intremap_entry_from_msi_msg(
192 struct amd_iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
193 {
194 unsigned long flags;
195 u32* entry;
196 u16 dev_id;
198 u8 delivery_mode, dest, vector, dest_mode;
200 dev_id = (pdev->bus << 8) | pdev->devfn;
202 spin_lock_irqsave(&int_remap_table_lock, flags);
203 dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
204 delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
205 vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
206 dest = (msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff;
208 entry = (u32*)get_intremap_entry((u8)vector, (u8)delivery_mode);
209 update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
210 spin_unlock_irqrestore(&int_remap_table_lock, flags);
212 spin_lock_irqsave(&iommu->lock, flags);
213 invalidate_interrupt_table(iommu, dev_id);
214 flush_command_buffer(iommu);
215 spin_unlock_irqrestore(&iommu->lock, flags);
217 return;
218 }
220 void amd_iommu_msi_msg_update_ire(
221 struct msi_desc *msi_desc, struct msi_msg *msg)
222 {
223 struct pci_dev *pdev = msi_desc->dev;
224 struct amd_iommu *iommu = NULL;
226 iommu = find_iommu_for_device(pdev->bus, pdev->devfn);
228 if ( !iommu || !int_remap_table )
229 return;
231 update_intremap_entry_from_msi_msg(iommu, pdev, msg);
232 }
234 unsigned int amd_iommu_read_ioapic_from_ire(
235 unsigned int apic, unsigned int reg)
236 {
237 *IO_APIC_BASE(apic) = reg;
238 return *(IO_APIC_BASE(apic)+4);
239 }
241 void amd_iommu_read_msi_from_ire(
242 struct msi_desc *msi_desc, struct msi_msg *msg)
243 {
244 }
246 int __init deallocate_intremap_table(void)
247 {
248 if ( int_remap_table )
249 {
250 __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
251 int_remap_table = NULL;
252 }
254 return 0;
255 }