ia64/xen-unstable

view xen/drivers/passthrough/vtd/iommu.h @ 19673:f3bed18decfc

[VTD] laying the ground work for ATS

These changes lay the ground work for ATS enabling in Xen. It will be
followed by patch which enables PCI MMCFG which is needed for actual
enabling of ATS functionality.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 29 09:19:30 2009 +0100 (2009-05-29)
parents c3a307f5a14b
children
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 */
20 #ifndef _INTEL_IOMMU_H_
21 #define _INTEL_IOMMU_H_
23 #include <xen/types.h>
25 /*
26 * Intel IOMMU register specification per version 1.0 public spec.
27 */
29 #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
30 #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
31 #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
32 #define DMAR_GCMD_REG 0x18 /* Global command register */
33 #define DMAR_GSTS_REG 0x1c /* Global status register */
34 #define DMAR_RTADDR_REG 0x20 /* Root entry table */
35 #define DMAR_CCMD_REG 0x28 /* Context command reg */
36 #define DMAR_FSTS_REG 0x34 /* Fault Status register */
37 #define DMAR_FECTL_REG 0x38 /* Fault control register */
38 #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
39 #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
40 #define DMAR_FEUADDR_REG 0x44 /* Upper address register */
41 #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
42 #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
43 #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
44 #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
45 #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
46 #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
47 #define DMAR_IQH_REG 0x80 /* invalidation queue head */
48 #define DMAR_IQT_REG 0x88 /* invalidation queue tail */
49 #define DMAR_IQA_REG 0x90 /* invalidation queue addr */
50 #define DMAR_IRTA_REG 0xB8 /* intr remap */
52 #define OFFSET_STRIDE (9)
53 #define dmar_readl(dmar, reg) readl(dmar + reg)
54 #define dmar_writel(dmar, reg, val) writel(val, dmar + reg)
55 #define dmar_readq(dmar, reg) ({ \
56 u32 lo, hi; \
57 lo = dmar_readl(dmar, reg); \
58 hi = dmar_readl(dmar, reg + 4); \
59 (((u64) hi) << 32) + lo; })
60 #define dmar_writeq(dmar, reg, val) do {\
61 dmar_writel(dmar, reg, (u32)val); \
62 dmar_writel(dmar, reg + 4, (u32)((u64) val >> 32)); \
63 } while (0)
65 #define VER_MAJOR(v) (((v) & 0xf0) >> 4)
66 #define VER_MINOR(v) ((v) & 0x0f)
68 /*
69 * Decoding Capability Register
70 */
71 #define cap_read_drain(c) (((c) >> 55) & 1)
72 #define cap_write_drain(c) (((c) >> 54) & 1)
73 #define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
74 #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
75 #define cap_pgsel_inv(c) (((c) >> 39) & 1)
77 #define cap_super_page_val(c) (((c) >> 34) & 0xf)
78 #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
79 * OFFSET_STRIDE) + 21)
81 #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
83 #define cap_isoch(c) (((c) >> 23) & 1)
84 #define cap_qos(c) (((c) >> 22) & 1)
85 #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
86 #define cap_sagaw(c) (((c) >> 8) & 0x1f)
87 #define cap_caching_mode(c) (((c) >> 7) & 1)
88 #define cap_phmr(c) (((c) >> 6) & 1)
89 #define cap_plmr(c) (((c) >> 5) & 1)
90 #define cap_rwbf(c) (((c) >> 4) & 1)
91 #define cap_afl(c) (((c) >> 3) & 1)
92 #define cap_ndoms(c) (1 << (4 + 2 * ((c) & 0x7)))
94 /*
95 * Extended Capability Register
96 */
98 #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
99 #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
100 #define ecap_coherent(e) ((e >> 0) & 0x1)
101 #define ecap_queued_inval(e) ((e >> 1) & 0x1)
102 #define ecap_dev_iotlb(e) ((e >> 2) & 0x1)
103 #define ecap_intr_remap(e) ((e >> 3) & 0x1)
104 #define ecap_ext_intr(e) ((e >> 4) & 0x1)
105 #define ecap_cache_hints(e) ((e >> 5) & 0x1)
106 #define ecap_pass_thru(e) ((e >> 6) & 0x1)
107 #define ecap_snp_ctl(e) ((e >> 7) & 0x1)
109 /* IOTLB_REG */
110 #define DMA_TLB_FLUSH_GRANU_OFFSET 60
111 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
112 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
113 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
114 #define DMA_TLB_IIRG(x) (((x) >> 60) & 7)
115 #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
116 #define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
118 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
119 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
120 #define DMA_TLB_IVT (((u64)1) << 63)
122 #define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
123 #define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
125 /* GCMD_REG */
126 #define DMA_GCMD_TE (((u64)1) << 31)
127 #define DMA_GCMD_SRTP (((u64)1) << 30)
128 #define DMA_GCMD_SFL (((u64)1) << 29)
129 #define DMA_GCMD_EAFL (((u64)1) << 28)
130 #define DMA_GCMD_WBF (((u64)1) << 27)
131 #define DMA_GCMD_QIE (((u64)1) << 26)
132 #define DMA_GCMD_IRE (((u64)1) << 25)
133 #define DMA_GCMD_SIRTP (((u64)1) << 24)
134 #define DMA_GCMD_CFI (((u64)1) << 23)
136 /* GSTS_REG */
137 #define DMA_GSTS_TES (((u64)1) << 31)
138 #define DMA_GSTS_RTPS (((u64)1) << 30)
139 #define DMA_GSTS_FLS (((u64)1) << 29)
140 #define DMA_GSTS_AFLS (((u64)1) << 28)
141 #define DMA_GSTS_WBFS (((u64)1) << 27)
142 #define DMA_GSTS_QIES (((u64)1) <<26)
143 #define DMA_GSTS_IRES (((u64)1) <<25)
144 #define DMA_GSTS_SIRTPS (((u64)1) << 24)
145 #define DMA_GSTS_CFIS (((u64)1) <<23)
147 /* PMEN_REG */
148 #define DMA_PMEN_EPM (((u32)1) << 31)
149 #define DMA_PMEN_PRS (((u32)1) << 0)
151 /* CCMD_REG */
152 #define DMA_CCMD_INVL_GRANU_OFFSET 61
153 #define DMA_CCMD_ICC (((u64)1) << 63)
154 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
155 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
156 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
157 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
158 #define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
159 #define DMA_CCMD_MASK_NOBIT 0
160 #define DMA_CCMD_MASK_1BIT 1
161 #define DMA_CCMD_MASK_2BIT 2
162 #define DMA_CCMD_MASK_3BIT 3
163 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
164 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
166 #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
168 /* FECTL_REG */
169 #define DMA_FECTL_IM (((u64)1) << 31)
171 /* FSTS_REG */
172 #define DMA_FSTS_PFO ((u64)1 << 0)
173 #define DMA_FSTS_PPF ((u64)1 << 1)
174 #define DMA_FSTS_AFO ((u64)1 << 2)
175 #define DMA_FSTS_APF ((u64)1 << 3)
176 #define DMA_FSTS_IQE ((u64)1 << 4)
177 #define DMA_FSTS_ICE ((u64)1 << 5)
178 #define DMA_FSTS_ITE ((u64)1 << 6)
179 #define DMA_FSTS_FAULTS DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
180 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
182 /* FRCD_REG, 32 bits access */
183 #define DMA_FRCD_F (((u64)1) << 31)
184 #define dma_frcd_type(d) ((d >> 30) & 1)
185 #define dma_frcd_fault_reason(c) (c & 0xff)
186 #define dma_frcd_source_id(c) (c & 0xffff)
187 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
189 /*
190 * 0: Present
191 * 1-11: Reserved
192 * 12-63: Context Ptr (12 - (haw-1))
193 * 64-127: Reserved
194 */
195 struct root_entry {
196 u64 val;
197 u64 rsvd1;
198 };
199 #define root_present(root) ((root).val & 1)
200 #define set_root_present(root) do {(root).val |= 1;} while(0)
201 #define get_context_addr(root) ((root).val & PAGE_MASK_4K)
202 #define set_root_value(root, value) \
203 do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
205 struct context_entry {
206 u64 lo;
207 u64 hi;
208 };
209 #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
210 #define context_present(c) ((c).lo & 1)
211 #define context_fault_disable(c) (((c).lo >> 1) & 1)
212 #define context_translation_type(c) (((c).lo >> 2) & 3)
213 #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
214 #define context_address_width(c) ((c).hi & 7)
215 #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
217 #define context_set_present(c) do {(c).lo |= 1;} while(0)
218 #define context_clear_present(c) do {(c).lo &= ~1;} while(0)
219 #define context_set_fault_enable(c) \
220 do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
222 #define context_set_translation_type(c, val) do { \
223 (c).lo &= (((u64)-1) << 4) | 3; \
224 (c).lo |= (val & 3) << 2; \
225 } while(0)
226 #define CONTEXT_TT_MULTI_LEVEL 0
227 #define CONTEXT_TT_DEV_IOTLB 1
228 #define CONTEXT_TT_PASS_THRU 2
230 #define context_set_address_root(c, val) \
231 do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
232 #define context_set_address_width(c, val) \
233 do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
234 #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
236 /* page table handling */
237 #define LEVEL_STRIDE (9)
238 #define LEVEL_MASK ((1 << LEVEL_STRIDE) - 1)
239 #define PTE_NUM (1 << LEVEL_STRIDE)
240 #define level_to_agaw(val) ((val) - 2)
241 #define agaw_to_level(val) ((val) + 2)
242 #define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
243 #define width_to_agaw(w) ((w - 30)/LEVEL_STRIDE)
244 #define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
245 #define address_level_offset(addr, level) \
246 ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
247 #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
248 #define level_size(l) (1 << level_to_offset_bits(l))
249 #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
251 /*
252 * 0: readable
253 * 1: writable
254 * 2-6: reserved
255 * 7: super page
256 * 8-11: available
257 * 12-63: Host physcial address
258 */
259 struct dma_pte {
260 u64 val;
261 };
262 #define DMA_PTE_READ (1)
263 #define DMA_PTE_WRITE (2)
264 #define DMA_PTE_SNP (1 << 11)
265 #define dma_clear_pte(p) do {(p).val = 0;} while(0)
266 #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0)
267 #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0)
268 #define dma_set_pte_superpage(p) do {(p).val |= (1 << 7);} while(0)
269 #define dma_set_pte_snp(p) do {(p).val |= DMA_PTE_SNP;} while(0)
270 #define dma_set_pte_prot(p, prot) \
271 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
272 #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
273 #define dma_set_pte_addr(p, addr) do {\
274 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
275 #define dma_pte_present(p) (((p).val & 3) != 0)
277 /* interrupt remap entry */
278 struct iremap_entry {
279 union {
280 u64 lo_val;
281 struct {
282 u64 p : 1,
283 fpd : 1,
284 dm : 1,
285 rh : 1,
286 tm : 1,
287 dlm : 3,
288 avail : 4,
289 res_1 : 4,
290 vector : 8,
291 res_2 : 8,
292 dst : 32;
293 }lo;
294 };
295 union {
296 u64 hi_val;
297 struct {
298 u64 sid : 16,
299 sq : 2,
300 svt : 2,
301 res_1 : 44;
302 }hi;
303 };
304 };
305 #define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
306 #define iremap_present(v) ((v).lo & 1)
307 #define iremap_fault_disable(v) (((v).lo >> 1) & 1)
309 #define iremap_set_present(v) do {(v).lo |= 1;} while(0)
310 #define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
312 /* queue invalidation entry */
313 struct qinval_entry {
314 union {
315 struct {
316 u64 lo;
317 u64 hi;
318 }val;
319 struct {
320 struct {
321 u64 type : 4,
322 granu : 2,
323 res_1 : 10,
324 did : 16,
325 sid : 16,
326 fm : 2,
327 res_2 : 14;
328 }lo;
329 struct {
330 u64 res;
331 }hi;
332 }cc_inv_dsc;
333 struct {
334 struct {
335 u64 type : 4,
336 granu : 2,
337 dw : 1,
338 dr : 1,
339 res_1 : 8,
340 did : 16,
341 res_2 : 32;
342 }lo;
343 struct {
344 u64 am : 6,
345 ih : 1,
346 res_1 : 5,
347 addr : 52;
348 }hi;
349 }iotlb_inv_dsc;
350 struct {
351 struct {
352 u64 type : 4,
353 res_1 : 12,
354 max_invs_pend: 5,
355 res_2 : 11,
356 sid : 16,
357 res_3 : 16;
358 }lo;
359 struct {
360 u64 size : 1,
361 res_1 : 11,
362 addr : 52;
363 }hi;
364 }dev_iotlb_inv_dsc;
365 struct {
366 struct {
367 u64 type : 4,
368 granu : 1,
369 res_1 : 22,
370 im : 5,
371 iidx : 16,
372 res_2 : 16;
373 }lo;
374 struct {
375 u64 res;
376 }hi;
377 }iec_inv_dsc;
378 struct {
379 struct {
380 u64 type : 4,
381 iflag : 1,
382 sw : 1,
383 fn : 1,
384 res_1 : 25,
385 sdata : 32;
386 }lo;
387 struct {
388 u64 res_1 : 2,
389 saddr : 62;
390 }hi;
391 }inv_wait_dsc;
392 }q;
393 };
395 struct poll_info {
396 u64 saddr;
397 u32 udata;
398 };
400 #define NUM_QINVAL_PAGES 1
401 #define IQA_REG_QS 0 // derived from NUM_QINVAL_PAGES per VT-d spec.
402 #define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct qinval_entry))
403 #define qinval_present(v) ((v).lo & 1)
404 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
406 #define qinval_set_present(v) do {(v).lo |= 1;} while(0)
407 #define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
409 #define RESERVED_VAL 0
411 #define TYPE_INVAL_CONTEXT 0x1
412 #define TYPE_INVAL_IOTLB 0x2
413 #define TYPE_INVAL_DEVICE_IOTLB 0x3
414 #define TYPE_INVAL_IEC 0x4
415 #define TYPE_INVAL_WAIT 0x5
417 #define NOTIFY_TYPE_POLL 1
418 #define NOTIFY_TYPE_INTR 1
419 #define INTERRUTP_FLAG 1
420 #define STATUS_WRITE 1
421 #define FENCE_FLAG 1
423 #define IEC_GLOBAL_INVL 0
424 #define IEC_INDEX_INVL 1
425 #define IRTA_REG_EIME_SHIFT 11
426 #define IRTA_REG_TABLE_SIZE 7 // 4k page = 256 * 16 byte entries
427 // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
428 // IRTA_REG_TABLE_SIZE = 7
430 #define VTD_PAGE_TABLE_LEVEL_3 3
431 #define VTD_PAGE_TABLE_LEVEL_4 4
433 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
434 #define MAX_IOMMU_REGS 0xc0
436 extern struct list_head acpi_drhd_units;
437 extern struct list_head acpi_rmrr_units;
438 extern struct list_head acpi_ioapic_units;
440 struct qi_ctrl {
441 u64 qinval_maddr; /* queue invalidation page machine address */
442 int qinval_index; /* queue invalidation index */
443 spinlock_t qinval_lock; /* lock for queue invalidation page */
444 spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
445 volatile u32 qinval_poll_status; /* used by poll methord to sync */
446 };
448 struct ir_ctrl {
449 u64 iremap_maddr; /* interrupt remap table machine address */
450 int iremap_index; /* interrupt remap index */
451 spinlock_t iremap_lock; /* lock for irq remappping table */
452 };
454 struct iommu_flush {
455 int (*context)(void *iommu, u16 did, u16 source_id,
456 u8 function_mask, u64 type, int non_present_entry_flush);
457 int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
458 u64 type, int flush_non_present_entry, int flush_dev_iotlb);
459 };
461 struct intel_iommu {
462 struct qi_ctrl qi_ctrl;
463 struct ir_ctrl ir_ctrl;
464 struct iommu_flush flush;
465 };
467 #endif