ia64/xen-unstable

view tools/ioemu/hw/pt-msi.c @ 18443:1a785d213573

ioemu: fix offset of MSI-X memory-mapped table.

Current code does not set dev->msix->table_off variable.
The offset of MSI-X memory mapped table is treated as 0.
The wrong region is unmapped from guest physical memory space.
As a result, guest device driver can't access memory mapped resource.

Signed-off-by: Yuji Shimada <shimada-yxb@necst.nec.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Sep 05 11:18:20 2008 +0100 (2008-09-05)
parents 0638a5c2cc9f
children
line source
1 /*
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Jiang Yunhong <yunhong.jiang@intel.com>
18 *
19 * This file implements direct PCI assignment to a HVM guest
20 */
22 #include "pt-msi.h"
23 #include <sys/mman.h>
25 /* MSI virtuailization functions */
27 /*
28 * setup physical msi, but didn't enable it
29 */
30 int pt_msi_setup(struct pt_dev *dev)
31 {
32 int pirq = -1;
34 if ( !(dev->msi->flags & MSI_FLAG_UNINIT) )
35 {
36 PT_LOG("setup physical after initialized?? \n");
37 return -1;
38 }
40 if ( xc_physdev_map_pirq_msi(xc_handle, domid, AUTO_ASSIGN, &pirq,
41 dev->pci_dev->dev << 3 | dev->pci_dev->func,
42 dev->pci_dev->bus, 0, 0) )
43 {
44 PT_LOG("error map msi\n");
45 return -1;
46 }
48 if ( pirq < 0 )
49 {
50 PT_LOG("invalid pirq number\n");
51 return -1;
52 }
54 dev->msi->pirq = pirq;
55 PT_LOG("msi mapped with pirq %x\n", pirq);
57 return 0;
58 }
60 uint32_t __get_msi_gflags(uint32_t data, uint64_t addr)
61 {
62 uint32_t result = 0;
63 int rh, dm, dest_id, deliv_mode, trig_mode;
65 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
66 dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
67 dest_id = (addr >> MSI_TARGET_CPU_SHIFT) & 0xff;
68 deliv_mode = (data >> MSI_DATA_DELIVERY_SHIFT) & 0x7;
69 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
71 result |= dest_id | (rh << GFLAGS_SHIFT_RH) | (dm << GFLAGS_SHIFT_DM) | \
72 (deliv_mode << GLFAGS_SHIFT_DELIV_MODE) |
73 (trig_mode << GLFAGS_SHIFT_TRG_MODE);
75 return result;
76 }
78 /*
79 * Update msi mapping, usually called when MSI enabled,
80 * except the first time
81 */
82 int pt_msi_update(struct pt_dev *d)
83 {
84 uint8_t gvec = 0;
85 uint32_t gflags = 0;
86 uint64_t addr = 0;
88 /* get vector, address, flags info, etc. */
89 gvec = d->msi->data & 0xFF;
90 addr = (uint64_t)d->msi->addr_hi << 32 | d->msi->addr_lo;
91 gflags = __get_msi_gflags(d->msi->data, addr);
93 PT_LOG("now update msi with pirq %x gvec %x\n", d->msi->pirq, gvec);
94 return xc_domain_update_msi_irq(xc_handle, domid, gvec,
95 d->msi->pirq, gflags);
96 }
98 /* MSI-X virtulization functions */
99 static void mask_physical_msix_entry(struct pt_dev *dev, int entry_nr, int mask)
100 {
101 void *phys_off;
103 phys_off = dev->msix->phys_iomem_base + 16 * entry_nr + 12;
104 *(uint32_t *)phys_off = mask;
105 }
107 static int pt_msix_update_one(struct pt_dev *dev, int entry_nr)
108 {
109 struct msix_entry_info *entry = &dev->msix->msix_entry[entry_nr];
110 int pirq = entry->pirq;
111 int gvec = entry->io_mem[2] & 0xff;
112 uint64_t gaddr = *(uint64_t *)&entry->io_mem[0];
113 uint32_t gflags = __get_msi_gflags(entry->io_mem[2], gaddr);
114 int ret;
116 if ( !entry->flags )
117 return 0;
119 /* Check if this entry is already mapped */
120 if ( entry->pirq == -1 )
121 {
122 ret = xc_physdev_map_pirq_msi(xc_handle, domid, AUTO_ASSIGN, &pirq,
123 dev->pci_dev->dev << 3 | dev->pci_dev->func,
124 dev->pci_dev->bus, entry_nr,
125 dev->msix->table_base);
126 if ( ret )
127 {
128 PT_LOG("error map msix entry %x\n", entry_nr);
129 return ret;
130 }
131 entry->pirq = pirq;
132 }
134 PT_LOG("now update msix entry %x with pirq %x gvec %x\n",
135 entry_nr, pirq, gvec);
137 ret = xc_domain_update_msi_irq(xc_handle, domid, gvec, pirq, gflags);
138 if ( ret )
139 {
140 PT_LOG("error update msix irq info for entry %d\n", entry_nr);
141 return ret;
142 }
144 entry->flags = 0;
146 return 0;
147 }
149 int pt_msix_update(struct pt_dev *dev)
150 {
151 struct pt_msix_info *msix = dev->msix;
152 int i;
154 for ( i = 0; i < msix->total_entries; i++ )
155 {
156 pt_msix_update_one(dev, i);
157 }
159 return 0;
160 }
162 static void pci_msix_invalid_write(void *opaque, target_phys_addr_t addr,
163 uint32_t val)
164 {
165 PT_LOG("invalid write to MSI-X table, \
166 only dword access is allowed.\n");
167 }
169 static void pci_msix_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
170 {
171 struct pt_dev *dev = (struct pt_dev *)opaque;
172 struct pt_msix_info *msix = dev->msix;
173 struct msix_entry_info *entry;
174 int entry_nr, offset;
176 if ( addr % 4 )
177 {
178 PT_LOG("unaligned dword access to MSI-X table, addr %016lx\n",
179 addr);
180 return;
181 }
183 entry_nr = (addr - msix->mmio_base_addr) / 16;
184 entry = &msix->msix_entry[entry_nr];
185 offset = ((addr - msix->mmio_base_addr) % 16) / 4;
187 if ( offset != 3 && msix->enabled && !(entry->io_mem[3] & 0x1) )
188 {
189 PT_LOG("can not update msix entry %d since MSI-X is already \
190 function now.\n", entry_nr);
191 return;
192 }
194 if ( offset != 3 && entry->io_mem[offset] != val )
195 entry->flags = 1;
196 entry->io_mem[offset] = val;
198 if ( offset == 3 )
199 {
200 if ( msix->enabled && !(val & 0x1) )
201 pt_msix_update_one(dev, entry_nr);
202 mask_physical_msix_entry(dev, entry_nr, entry->io_mem[3] & 0x1);
203 }
204 }
206 static CPUWriteMemoryFunc *pci_msix_write[] = {
207 pci_msix_invalid_write,
208 pci_msix_invalid_write,
209 pci_msix_writel
210 };
212 static uint32_t pci_msix_invalid_read(void *opaque, target_phys_addr_t addr)
213 {
214 PT_LOG("invalid read to MSI-X table, \
215 only dword access is allowed.\n");
216 return 0;
217 }
219 static uint32_t pci_msix_readl(void *opaque, target_phys_addr_t addr)
220 {
221 struct pt_dev *dev = (struct pt_dev *)opaque;
222 struct pt_msix_info *msix = dev->msix;
223 int entry_nr, offset;
225 if ( addr % 4 )
226 {
227 PT_LOG("unaligned dword access to MSI-X table, addr %016lx\n",
228 addr);
229 return 0;
230 }
232 entry_nr = (addr - msix->mmio_base_addr) / 16;
233 offset = ((addr - msix->mmio_base_addr) % 16) / 4;
235 return msix->msix_entry[entry_nr].io_mem[offset];
236 }
238 static CPUReadMemoryFunc *pci_msix_read[] = {
239 pci_msix_invalid_read,
240 pci_msix_invalid_read,
241 pci_msix_readl
242 };
244 int add_msix_mapping(struct pt_dev *dev, int bar_index)
245 {
246 if ( !(dev->msix && dev->msix->bar_index == bar_index) )
247 return 0;
249 return xc_domain_memory_mapping(xc_handle, domid,
250 dev->msix->mmio_base_addr >> XC_PAGE_SHIFT,
251 (dev->bases[bar_index].access.maddr
252 + dev->msix->table_off) >> XC_PAGE_SHIFT,
253 (dev->msix->total_entries * 16
254 + XC_PAGE_SIZE -1) >> XC_PAGE_SHIFT,
255 DPCI_ADD_MAPPING);
256 }
258 int remove_msix_mapping(struct pt_dev *dev, int bar_index)
259 {
260 if ( !(dev->msix && dev->msix->bar_index == bar_index) )
261 return 0;
263 dev->msix->mmio_base_addr = dev->bases[bar_index].e_physbase
264 + dev->msix->table_off;
266 cpu_register_physical_memory(dev->msix->mmio_base_addr,
267 dev->msix->total_entries * 16,
268 dev->msix->mmio_index);
270 return xc_domain_memory_mapping(xc_handle, domid,
271 dev->msix->mmio_base_addr >> XC_PAGE_SHIFT,
272 (dev->bases[bar_index].access.maddr
273 + dev->msix->table_off) >> XC_PAGE_SHIFT,
274 (dev->msix->total_entries * 16
275 + XC_PAGE_SIZE -1) >> XC_PAGE_SHIFT,
276 DPCI_REMOVE_MAPPING);
277 }
279 int pt_msix_init(struct pt_dev *dev, int pos)
280 {
281 uint8_t id;
282 uint16_t control;
283 int i, total_entries, table_off, bar_index;
284 struct pci_dev *pd = dev->pci_dev;
286 id = pci_read_byte(pd, pos + PCI_CAP_LIST_ID);
288 if ( id != PCI_CAP_ID_MSIX )
289 {
290 PT_LOG("error id %x pos %x\n", id, pos);
291 return -1;
292 }
294 control = pci_read_word(pd, pos + 2);
295 total_entries = control & 0x7ff;
296 total_entries += 1;
298 dev->msix = malloc(sizeof(struct pt_msix_info)
299 + total_entries*sizeof(struct msix_entry_info));
300 if ( !dev->msix )
301 {
302 PT_LOG("error allocation pt_msix_info\n");
303 return -1;
304 }
305 memset(dev->msix, 0, sizeof(struct pt_msix_info)
306 + total_entries*sizeof(struct msix_entry_info));
307 dev->msix->total_entries = total_entries;
308 for ( i = 0; i < total_entries; i++ )
309 dev->msix->msix_entry[i].pirq = -1;
311 dev->msix->mmio_index =
312 cpu_register_io_memory(0, pci_msix_read, pci_msix_write, dev);
314 table_off = pci_read_long(pd, pos + PCI_MSIX_TABLE);
315 bar_index = dev->msix->bar_index = table_off & PCI_MSIX_BIR;
316 table_off = dev->msix->table_off = table_off & ~PCI_MSIX_BIR;
317 dev->msix->table_base = dev->pci_dev->base_addr[bar_index];
318 PT_LOG("get MSI-X table bar base %llx\n",
319 (unsigned long long)dev->msix->table_base);
321 dev->msix->fd = open("/dev/mem", O_RDWR);
322 dev->msix->phys_iomem_base = mmap(0, total_entries * 16,
323 PROT_WRITE | PROT_READ, MAP_SHARED | MAP_LOCKED,
324 dev->msix->fd, dev->msix->table_base + table_off);
325 PT_LOG("mapping physical MSI-X table to %lx\n",
326 (unsigned long)dev->msix->phys_iomem_base);
327 return 0;
328 }
330 void pt_msix_delete(struct pt_dev *dev)
331 {
332 /* unmap the MSI-X memory mapped register area */
333 if (dev->msix->phys_iomem_base)
334 {
335 PT_LOG("unmapping physical MSI-X table from %lx\n",
336 (unsigned long)dev->msix->phys_iomem_base);
337 munmap(dev->msix->phys_iomem_base, dev->msix->total_entries * 16);
338 }
340 free(dev->msix);
341 }