ia64/xen-unstable

view xen/drivers/passthrough/vtd/x86/ats.c @ 19697:42fe00c6f8b4

Enable pci mmcfg and ATS for x86_64

This patch enables PCI MMCONFIG in xen and turns on hooks for ATS.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 02 11:49:34 2009 +0100 (2009-06-02)
parents f3bed18decfc
children 133c889c21a7
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Allen Kay <allen.m.kay@intel.com>
18 */
20 #include <xen/sched.h>
21 #include <xen/iommu.h>
22 #include <xen/time.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/msi.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
29 #include "../extern.h"
31 LIST_HEAD(ats_dev_drhd_units);
33 #define ATS_REG_CAP 4
34 #define ATS_REG_CTL 6
35 #define ATS_QUEUE_DEPTH_MASK 0xF
36 #define ATS_ENABLE (1<<15)
38 struct pci_ats_dev {
39 struct list_head list;
40 u8 bus;
41 u8 devfn;
42 u16 ats_queue_depth; /* ATS device invalidation queue depth */
43 spinlock_t lock;
44 };
45 static LIST_HEAD(ats_devices);
47 static void parse_ats_param(char *s);
48 custom_param("ats", parse_ats_param);
50 int ats_enabled = 1;
52 static void parse_ats_param(char *s)
53 {
54 char *ss;
56 do {
57 ss = strchr(s, ',');
58 if ( ss )
59 *ss = '\0';
61 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
62 !strcmp(s, "0") || !strcmp(s, "disable") )
63 ats_enabled = 0;
65 if ( !strcmp(s, "on") || !strcmp(s, "yes") || !strcmp(s, "true") ||
66 !strcmp(s, "1") || !strcmp(s, "enable") )
67 ats_enabled = 1;
69 s = ss + 1;
70 } while ( ss );
71 }
73 struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
74 {
75 struct acpi_drhd_unit *drhd;
76 list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
77 {
78 if ( drhd->iommu == iommu )
79 return drhd;
80 }
81 return NULL;
82 }
84 int ats_device(int seg, int bus, int devfn)
85 {
86 struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
87 struct pci_dev *pdev;
88 int pos = 0;
90 if ( !ats_enabled )
91 return 0;
93 if ( !qinval_enabled )
94 return 0;
96 pdev = pci_get_pdev(bus, devfn);
97 drhd = acpi_find_matched_drhd_unit(pdev);
98 if ( !ecap_dev_iotlb(drhd->iommu->ecap) )
99 return 0;
101 if ( !acpi_find_matched_atsr_unit(bus, devfn) )
102 return 0;
104 ats_drhd = find_ats_dev_drhd(drhd->iommu);
105 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
107 if ( pos && (ats_drhd == NULL) )
108 {
109 new_drhd = xmalloc(struct acpi_drhd_unit);
110 memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
111 list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
112 }
113 return pos;
114 }
116 int enable_ats_device(int seg, int bus, int devfn)
117 {
118 struct pci_ats_dev *pdev;
119 u32 value;
120 u16 queue_depth;
121 int pos;
123 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
125 if ( !pos )
126 {
127 dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x:%x\n",
128 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
129 return 0;
130 }
131 else
132 dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x:%x\n",
133 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
135 /* BUGBUG: add back seg when multi-seg platform support is enabled */
136 value = pci_conf_read16(bus, PCI_SLOT(devfn),
137 PCI_FUNC(devfn), pos + ATS_REG_CAP);
138 queue_depth = value & ATS_QUEUE_DEPTH_MASK;
140 /* BUGBUG: add back seg when multi-seg platform support is enabled */
141 value = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL);
142 value |= ATS_ENABLE;
144 /* BUGBUG: add back seg when multi-seg platform support is enabled */
145 pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL, value);
147 if ( acpi_find_matched_atsr_unit(bus, devfn) )
148 {
149 pdev = xmalloc(struct pci_ats_dev);
150 pdev->bus = bus;
151 pdev->devfn = devfn;
152 pdev->ats_queue_depth = queue_depth;
153 list_add(&(pdev->list), &ats_devices);
154 }
155 return pos;
156 }
158 static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
159 {
160 struct root_entry *root_entry = NULL;
161 struct context_entry *ctxt_entry = NULL;
162 int tt, found = 0;
164 root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
165 if ( !root_entry || !root_present(root_entry[pdev->bus]) )
166 goto out;
168 ctxt_entry = (struct context_entry *)
169 map_vtd_domain_page(root_entry[pdev->bus].val);
171 if ( ctxt_entry == NULL )
172 goto out;
174 if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
175 goto out;
177 tt = context_translation_type(ctxt_entry[pdev->devfn]);
178 if ( tt != CONTEXT_TT_DEV_IOTLB )
179 goto out;
181 found = 1;
182 out:
183 if ( root_entry )
184 unmap_vtd_domain_page(root_entry);
186 if ( ctxt_entry )
187 unmap_vtd_domain_page(ctxt_entry);
189 if ( found )
190 return 1;
192 return 0;
193 }
195 int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
196 u64 addr, unsigned int size_order, u64 type)
197 {
198 struct pci_ats_dev *pdev;
199 int sbit, ret = 0;
200 u16 sid;
202 if ( !ecap_dev_iotlb(iommu->ecap) )
203 return ret;
205 list_for_each_entry( pdev, &ats_devices, list )
206 {
207 sid = (pdev->bus << 8) | pdev->devfn;
209 switch ( type ) {
210 case DMA_TLB_DSI_FLUSH:
211 if ( !device_in_domain(iommu, pdev, did) )
212 break;
213 /* fall through if DSI condition met */
214 case DMA_TLB_GLOBAL_FLUSH:
215 /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
216 sbit = 1;
217 addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
218 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
219 sid, sbit, addr);
220 break;
221 case DMA_TLB_PSI_FLUSH:
222 if ( !device_in_domain(iommu, pdev, did) )
223 break;
225 addr &= ~0 << (PAGE_SHIFT + size_order);
227 /* if size <= 4K, set sbit = 0, else set sbit = 1 */
228 sbit = size_order ? 1 : 0;
230 /* clear lower bits */
231 addr &= (~0 << (PAGE_SHIFT + size_order));
233 /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
234 if ( sbit )
235 addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
237 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
238 sid, sbit, addr);
239 break;
240 default:
241 dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
242 break;
243 }
244 }
245 return ret;
246 }