ia64/xen-unstable

view xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @ 19107:696351cde9a4

Allow memflags to be specified to alloc_xenheap_pages().

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 28 16:58:41 2009 +0000 (2009-01-28)
parents 1dfc48a8c361
children 78962f85c562
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
22 #define _ASM_X86_64_AMD_IOMMU_PROTO_H
24 #include <xen/sched.h>
25 #include <asm/amd-iommu.h>
26 #include <xen/domain_page.h>
28 #define for_each_amd_iommu(amd_iommu) \
29 list_for_each_entry(amd_iommu, \
30 &amd_iommu_head, list)
32 #define DMA_32BIT_MASK 0x00000000ffffffffULL
33 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
35 #ifdef AMD_IOV_DEBUG
36 #define amd_iov_info(fmt, args...) \
37 printk(XENLOG_INFO "AMD_IOV: " fmt, ## args)
38 #define amd_iov_warning(fmt, args...) \
39 printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args)
40 #define amd_iov_error(fmt, args...) \
41 printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args)
42 #else
43 #define amd_iov_info(fmt, args...)
44 #define amd_iov_warning(fmt, args...)
45 #define amd_iov_error(fmt, args...)
46 #endif
48 /* amd-iommu-detect functions */
49 int __init amd_iommu_get_ivrs_dev_entries(void);
50 int __init amd_iommu_detect_one_acpi(void *ivhd);
51 int __init amd_iommu_detect_acpi(void);
53 /* amd-iommu-init functions */
54 int __init amd_iommu_init(void);
55 int __init amd_iommu_init_one(struct amd_iommu *iommu);
56 int __init amd_iommu_update_ivrs_mapping_acpi(void);
57 void __init amd_iommu_init_cleanup(void);
58 int __init amd_iommu_setup_shared_tables(void);
60 /* mapping functions */
61 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
62 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
63 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
64 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
65 unsigned long phys_addr, unsigned long size, int iw, int ir);
66 int amd_iommu_sync_p2m(struct domain *d);
67 void invalidate_all_iommu_pages(struct domain *d);
69 /* device table functions */
70 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
71 u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
72 int amd_iommu_is_dte_page_translation_valid(u32 *entry);
73 void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
75 /* send cmd to iommu */
76 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
77 void flush_command_buffer(struct amd_iommu *iommu);
79 /* find iommu for bdf */
80 struct amd_iommu *find_iommu_for_device(int bus, int devfn);
82 /*interrupt remapping */
83 int __init amd_iommu_setup_intremap_table(void);
84 int __init deallocate_intremap_table(void);
85 void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
86 void amd_iommu_ioapic_update_ire(
87 unsigned int apic, unsigned int reg, unsigned int value);
88 void amd_iommu_msi_msg_update_ire(
89 struct msi_desc *msi_desc, struct msi_msg *msg);
91 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
92 {
93 u32 field;
94 field = (reg_value & mask) >> shift;
95 return field;
96 }
98 static inline u32 set_field_in_reg_u32(u32 field, u32 reg_value,
99 u32 mask, u32 shift, u32 *reg)
100 {
101 reg_value &= ~mask;
102 reg_value |= (field << shift) & mask;
103 if (reg)
104 *reg = reg_value;
105 return reg_value;
106 }
108 static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift)
109 {
110 u8 field;
111 field = (value & mask) >> shift;
112 return field;
113 }
115 static inline unsigned long region_to_pages(unsigned long addr, unsigned long size)
116 {
117 return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
118 }
120 static inline struct page_info* alloc_amd_iommu_pgtable(void)
121 {
122 struct page_info *pg;
123 void *vaddr;
125 pg = alloc_domheap_page(NULL, 0);
126 vaddr = map_domain_page(page_to_mfn(pg));
127 if ( !vaddr )
128 return 0;
129 memset(vaddr, 0, PAGE_SIZE);
130 unmap_domain_page(vaddr);
131 return pg;
132 }
134 static inline void free_amd_iommu_pgtable(struct page_info *pg)
135 {
136 if ( pg != 0 )
137 free_domheap_page(pg);
138 }
140 static inline void* __alloc_amd_iommu_tables(int order)
141 {
142 void *buf;
143 buf = alloc_xenheap_pages(order, 0);
144 return buf;
145 }
147 static inline void __free_amd_iommu_tables(void *table, int order)
148 {
149 free_xenheap_pages(table, order);
150 }
152 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */