ia64/xen-unstable

view xen/drivers/passthrough/vtd/x86/ats.c @ 19673:f3bed18decfc

[VTD] laying the ground work for ATS

These changes lay the ground work for ATS enabling in Xen. It will be
followed by patch which enables PCI MMCFG which is needed for actual
enabling of ATS functionality.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 29 09:19:30 2009 +0100 (2009-05-29)
parents
children 42fe00c6f8b4
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Allen Kay <allen.m.kay@intel.com>
18 */
20 #include <xen/sched.h>
21 #include <xen/iommu.h>
22 #include <xen/time.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/msi.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
29 #include "../extern.h"
31 LIST_HEAD(ats_dev_drhd_units);
33 #define ATS_REG_CAP 4
34 #define ATS_REG_CTL 6
35 #define ATS_QUEUE_DEPTH_MASK 0xF
36 #define ATS_ENABLE (1<<15)
38 struct pci_ats_dev {
39 struct list_head list;
40 u8 bus;
41 u8 devfn;
42 u16 ats_queue_depth; /* ATS device invalidation queue depth */
43 spinlock_t lock;
44 };
45 static LIST_HEAD(ats_devices);
47 static void parse_ats_param(char *s);
48 custom_param("ats", parse_ats_param);
50 int ats_enabled = 1;
52 static void parse_ats_param(char *s)
53 {
54 char *ss;
56 do {
57 ss = strchr(s, ',');
58 if ( ss )
59 *ss = '\0';
61 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
62 !strcmp(s, "0") || !strcmp(s, "disable") )
63 ats_enabled = 0;
65 if ( !strcmp(s, "on") || !strcmp(s, "yes") || !strcmp(s, "true") ||
66 !strcmp(s, "1") || !strcmp(s, "enable") )
67 ats_enabled = 1;
69 s = ss + 1;
70 } while ( ss );
71 }
73 struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
74 {
75 struct acpi_drhd_unit *drhd;
76 list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
77 {
78 if ( drhd->iommu == iommu )
79 return drhd;
80 }
81 return NULL;
82 }
84 /*
85 * BUGBUG: return 0 until pcimmcfg is checked in.
86 */
87 int pci_find_ext_capability(int seg, int bus, int devfn, int cap)
88 {
89 return 0;
90 }
92 int ats_device(int seg, int bus, int devfn)
93 {
94 struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
95 struct pci_dev *pdev;
96 int pos = 0;
98 if ( !ats_enabled )
99 return 0;
101 if ( !qinval_enabled )
102 return 0;
104 pdev = pci_get_pdev(bus, devfn);
105 drhd = acpi_find_matched_drhd_unit(pdev);
106 if ( !ecap_dev_iotlb(drhd->iommu->ecap) )
107 return 0;
109 if ( !acpi_find_matched_atsr_unit(bus, devfn) )
110 return 0;
112 ats_drhd = find_ats_dev_drhd(drhd->iommu);
113 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
115 if ( pos && (ats_drhd == NULL) )
116 {
117 new_drhd = xmalloc(struct acpi_drhd_unit);
118 memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
119 list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
120 }
121 return pos;
122 }
124 int enable_ats_device(int seg, int bus, int devfn)
125 {
126 struct pci_ats_dev *pdev;
127 u32 value;
128 u16 queue_depth;
129 int pos;
131 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
133 if ( !pos )
134 {
135 dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x:%x\n",
136 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
137 return 0;
138 }
139 else
140 dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x:%x\n",
141 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
143 /* BUGBUG: add back seg when multi-seg platform support is enabled */
144 value = pci_conf_read16(bus, PCI_SLOT(devfn),
145 PCI_FUNC(devfn), pos + ATS_REG_CAP);
146 queue_depth = value & ATS_QUEUE_DEPTH_MASK;
148 /* BUGBUG: add back seg when multi-seg platform support is enabled */
149 value = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL);
150 value |= ATS_ENABLE;
152 /* BUGBUG: add back seg when multi-seg platform support is enabled */
153 pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL, value);
155 if ( acpi_find_matched_atsr_unit(bus, devfn) )
156 {
157 pdev = xmalloc(struct pci_ats_dev);
158 pdev->bus = bus;
159 pdev->devfn = devfn;
160 pdev->ats_queue_depth = queue_depth;
161 list_add(&(pdev->list), &ats_devices);
162 }
163 return pos;
164 }
166 static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
167 {
168 struct root_entry *root_entry = NULL;
169 struct context_entry *ctxt_entry = NULL;
170 int tt, found = 0;
172 root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
173 if ( !root_entry || !root_present(root_entry[pdev->bus]) )
174 goto out;
176 ctxt_entry = (struct context_entry *)
177 map_vtd_domain_page(root_entry[pdev->bus].val);
179 if ( ctxt_entry == NULL )
180 goto out;
182 if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
183 goto out;
185 tt = context_translation_type(ctxt_entry[pdev->devfn]);
186 if ( tt != CONTEXT_TT_DEV_IOTLB )
187 goto out;
189 found = 1;
190 out:
191 if ( root_entry )
192 unmap_vtd_domain_page(root_entry);
194 if ( ctxt_entry )
195 unmap_vtd_domain_page(ctxt_entry);
197 if ( found )
198 return 1;
200 return 0;
201 }
203 int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
204 u64 addr, unsigned int size_order, u64 type)
205 {
206 struct pci_ats_dev *pdev;
207 int sbit, ret = 0;
208 u16 sid;
210 if ( !ecap_dev_iotlb(iommu->ecap) )
211 return ret;
213 list_for_each_entry( pdev, &ats_devices, list )
214 {
215 sid = (pdev->bus << 8) | pdev->devfn;
217 switch ( type ) {
218 case DMA_TLB_DSI_FLUSH:
219 if ( !device_in_domain(iommu, pdev, did) )
220 break;
221 /* fall through if DSI condition met */
222 case DMA_TLB_GLOBAL_FLUSH:
223 /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
224 sbit = 1;
225 addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
226 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
227 sid, sbit, addr);
228 break;
229 case DMA_TLB_PSI_FLUSH:
230 if ( !device_in_domain(iommu, pdev, did) )
231 break;
233 addr &= ~0 << (PAGE_SHIFT + size_order);
235 /* if size <= 4K, set sbit = 0, else set sbit = 1 */
236 sbit = size_order ? 1 : 0;
238 /* clear lower bits */
239 addr &= (~0 << (PAGE_SHIFT + size_order));
241 /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
242 if ( sbit )
243 addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
245 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
246 sid, sbit, addr);
247 break;
248 default:
249 dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
250 break;
251 }
252 }
253 return ret;
254 }