ia64/xen-unstable

view xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 696351cde9a4
children
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
22 #define _ASM_X86_64_AMD_IOMMU_PROTO_H
24 #include <xen/sched.h>
25 #include <asm/amd-iommu.h>
26 #include <xen/domain_page.h>
28 #define for_each_amd_iommu(amd_iommu) \
29 list_for_each_entry(amd_iommu, \
30 &amd_iommu_head, list)
32 #define DMA_32BIT_MASK 0x00000000ffffffffULL
33 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
35 #ifdef AMD_IOV_DEBUG
36 #define amd_iov_info(fmt, args...) \
37 printk(XENLOG_INFO "AMD_IOV: " fmt, ## args)
38 #define amd_iov_warning(fmt, args...) \
39 printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args)
40 #define amd_iov_error(fmt, args...) \
41 printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args)
42 #else
43 #define amd_iov_info(fmt, args...)
44 #define amd_iov_warning(fmt, args...)
45 #define amd_iov_error(fmt, args...)
46 #endif
48 /* amd-iommu-detect functions */
49 int __init amd_iommu_get_ivrs_dev_entries(void);
50 int __init amd_iommu_detect_one_acpi(void *ivhd);
51 int __init amd_iommu_detect_acpi(void);
53 /* amd-iommu-init functions */
54 int __init amd_iommu_init(void);
55 int __init amd_iommu_init_one(struct amd_iommu *iommu);
56 int __init amd_iommu_update_ivrs_mapping_acpi(void);
57 void __init amd_iommu_init_cleanup(void);
58 int __init amd_iommu_setup_shared_tables(void);
60 /* mapping functions */
61 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
62 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
63 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
64 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
65 unsigned long phys_addr, unsigned long size, int iw, int ir);
66 int amd_iommu_sync_p2m(struct domain *d);
67 void invalidate_all_iommu_pages(struct domain *d);
69 /* device table functions */
70 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
71 u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
72 int amd_iommu_is_dte_page_translation_valid(u32 *entry);
73 void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
75 /* send cmd to iommu */
76 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
77 void flush_command_buffer(struct amd_iommu *iommu);
79 /* find iommu for bdf */
80 struct amd_iommu *find_iommu_for_device(int bus, int devfn);
82 /*interrupt remapping */
83 int __init amd_iommu_setup_intremap_table(void);
84 int __init deallocate_intremap_table(void);
85 void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
86 void amd_iommu_ioapic_update_ire(
87 unsigned int apic, unsigned int reg, unsigned int value);
88 void amd_iommu_msi_msg_update_ire(
89 struct msi_desc *msi_desc, struct msi_msg *msg);
90 void amd_iommu_read_msi_from_ire(
91 struct msi_desc *msi_desc, struct msi_msg *msg);
92 unsigned int amd_iommu_read_ioapic_from_ire(
93 unsigned int apic, unsigned int reg);
95 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
96 {
97 u32 field;
98 field = (reg_value & mask) >> shift;
99 return field;
100 }
102 static inline u32 set_field_in_reg_u32(u32 field, u32 reg_value,
103 u32 mask, u32 shift, u32 *reg)
104 {
105 reg_value &= ~mask;
106 reg_value |= (field << shift) & mask;
107 if (reg)
108 *reg = reg_value;
109 return reg_value;
110 }
112 static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift)
113 {
114 u8 field;
115 field = (value & mask) >> shift;
116 return field;
117 }
119 static inline unsigned long region_to_pages(unsigned long addr, unsigned long size)
120 {
121 return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
122 }
124 static inline struct page_info* alloc_amd_iommu_pgtable(void)
125 {
126 struct page_info *pg;
127 void *vaddr;
129 pg = alloc_domheap_page(NULL, 0);
130 vaddr = map_domain_page(page_to_mfn(pg));
131 if ( !vaddr )
132 return 0;
133 memset(vaddr, 0, PAGE_SIZE);
134 unmap_domain_page(vaddr);
135 return pg;
136 }
138 static inline void free_amd_iommu_pgtable(struct page_info *pg)
139 {
140 if ( pg != 0 )
141 free_domheap_page(pg);
142 }
144 static inline void* __alloc_amd_iommu_tables(int order)
145 {
146 void *buf;
147 buf = alloc_xenheap_pages(order, 0);
148 return buf;
149 }
151 static inline void __free_amd_iommu_tables(void *table, int order)
152 {
153 free_xenheap_pages(table, order);
154 }
156 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */