ia64/xen-unstable

annotate xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 78962f85c562
children
rev   line source
keir@15957 1 /*
keir@15957 2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
keir@15957 3 * Author: Leo Duran <leo.duran@amd.com>
keir@15957 4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
keir@15957 5 *
keir@15957 6 * This program is free software; you can redistribute it and/or modify
keir@15957 7 * it under the terms of the GNU General Public License as published by
keir@15957 8 * the Free Software Foundation; either version 2 of the License, or
keir@15957 9 * (at your option) any later version.
keir@15957 10 *
keir@15957 11 * This program is distributed in the hope that it will be useful,
keir@15957 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
keir@15957 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
keir@15957 14 * GNU General Public License for more details.
keir@15957 15 *
keir@15957 16 * You should have received a copy of the GNU General Public License
keir@15957 17 * along with this program; if not, write to the Free Software
keir@15957 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
keir@15957 19 */
keir@15957 20
keir@15957 21 #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
keir@15957 22 #define _ASM_X86_64_AMD_IOMMU_PROTO_H
keir@15957 23
keir@17153 24 #include <xen/sched.h>
keir@15957 25 #include <asm/amd-iommu.h>
keir@19024 26 #include <xen/domain_page.h>
keir@15957 27
keir@15957 28 #define for_each_amd_iommu(amd_iommu) \
keir@15957 29 list_for_each_entry(amd_iommu, \
keir@15957 30 &amd_iommu_head, list)
keir@15957 31
keir@15957 32 #define DMA_32BIT_MASK 0x00000000ffffffffULL
keir@15957 33 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
keir@15957 34
keir@17486 35 #ifdef AMD_IOV_DEBUG
keir@17486 36 #define amd_iov_info(fmt, args...) \
keir@17486 37 printk(XENLOG_INFO "AMD_IOV: " fmt, ## args)
keir@17486 38 #define amd_iov_warning(fmt, args...) \
keir@17486 39 printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args)
keir@17486 40 #define amd_iov_error(fmt, args...) \
keir@17486 41 printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args)
keir@17486 42 #else
keir@17486 43 #define amd_iov_info(fmt, args...)
keir@17486 44 #define amd_iov_warning(fmt, args...)
keir@17486 45 #define amd_iov_error(fmt, args...)
keir@17486 46 #endif
keir@17486 47
keir@15957 48 /* amd-iommu-detect functions */
keir@18224 49 int __init amd_iommu_get_ivrs_dev_entries(void);
keir@18224 50 int __init amd_iommu_detect_one_acpi(void *ivhd);
keir@18224 51 int __init amd_iommu_detect_acpi(void);
keir@15957 52
keir@15957 53 /* amd-iommu-init functions */
keir@18224 54 int __init amd_iommu_init(void);
keir@18224 55 int __init amd_iommu_init_one(struct amd_iommu *iommu);
keir@18224 56 int __init amd_iommu_update_ivrs_mapping_acpi(void);
keir@18224 57 void __init amd_iommu_init_cleanup(void);
keir@18224 58 int __init amd_iommu_setup_shared_tables(void);
keir@15957 59
keir@15957 60 /* mapping functions */
keir@17064 61 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
keir@15957 62 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
keir@19024 63 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
keir@17153 64 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
keir@17153 65 unsigned long phys_addr, unsigned long size, int iw, int ir);
keir@17390 66 int amd_iommu_sync_p2m(struct domain *d);
keir@18858 67 void invalidate_all_iommu_pages(struct domain *d);
keir@15957 68
keir@15957 69 /* device table functions */
keir@18037 70 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
keir@17153 71 u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
keir@17064 72 int amd_iommu_is_dte_page_translation_valid(u32 *entry);
keir@19024 73 void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
keir@15957 74
keir@15957 75 /* send cmd to iommu */
keir@15957 76 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
keir@17064 77 void flush_command_buffer(struct amd_iommu *iommu);
keir@15957 78
keir@15957 79 /* find iommu for bdf */
keir@15957 80 struct amd_iommu *find_iommu_for_device(int bus, int devfn);
keir@15957 81
keir@18037 82 /*interrupt remapping */
keir@18224 83 int __init amd_iommu_setup_intremap_table(void);
keir@18224 84 int __init deallocate_intremap_table(void);
keir@18037 85 void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
keir@18037 86 void amd_iommu_ioapic_update_ire(
keir@18037 87 unsigned int apic, unsigned int reg, unsigned int value);
keir@18037 88 void amd_iommu_msi_msg_update_ire(
keir@18037 89 struct msi_desc *msi_desc, struct msi_msg *msg);
keir@19800 90 void amd_iommu_read_msi_from_ire(
keir@19800 91 struct msi_desc *msi_desc, struct msi_msg *msg);
keir@19800 92 unsigned int amd_iommu_read_ioapic_from_ire(
keir@19800 93 unsigned int apic, unsigned int reg);
keir@18037 94
keir@15957 95 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
keir@15957 96 {
keir@15957 97 u32 field;
keir@15957 98 field = (reg_value & mask) >> shift;
keir@15957 99 return field;
keir@15957 100 }
keir@15957 101
keir@15957 102 static inline u32 set_field_in_reg_u32(u32 field, u32 reg_value,
keir@15957 103 u32 mask, u32 shift, u32 *reg)
keir@15957 104 {
keir@15957 105 reg_value &= ~mask;
keir@15957 106 reg_value |= (field << shift) & mask;
keir@15957 107 if (reg)
keir@15957 108 *reg = reg_value;
keir@15957 109 return reg_value;
keir@15957 110 }
keir@15957 111
keir@17153 112 static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift)
keir@17153 113 {
keir@17153 114 u8 field;
keir@17153 115 field = (value & mask) >> shift;
keir@17153 116 return field;
keir@17153 117 }
keir@17153 118
keir@17153 119 static inline unsigned long region_to_pages(unsigned long addr, unsigned long size)
keir@17153 120 {
keir@17153 121 return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
keir@17153 122 }
keir@17153 123
keir@19024 124 static inline struct page_info* alloc_amd_iommu_pgtable(void)
keir@19024 125 {
keir@19024 126 struct page_info *pg;
keir@19024 127 void *vaddr;
keir@19024 128
keir@19024 129 pg = alloc_domheap_page(NULL, 0);
keir@19024 130 vaddr = map_domain_page(page_to_mfn(pg));
keir@19024 131 if ( !vaddr )
keir@19024 132 return 0;
keir@19024 133 memset(vaddr, 0, PAGE_SIZE);
keir@19024 134 unmap_domain_page(vaddr);
keir@19024 135 return pg;
keir@19024 136 }
keir@19024 137
keir@19024 138 static inline void free_amd_iommu_pgtable(struct page_info *pg)
keir@19024 139 {
keir@19024 140 if ( pg != 0 )
keir@19024 141 free_domheap_page(pg);
keir@19024 142 }
keir@19024 143
keir@19024 144 static inline void* __alloc_amd_iommu_tables(int order)
keir@19024 145 {
keir@19024 146 void *buf;
keir@19107 147 buf = alloc_xenheap_pages(order, 0);
keir@19024 148 return buf;
keir@19024 149 }
keir@19024 150
keir@19024 151 static inline void __free_amd_iommu_tables(void *table, int order)
keir@19024 152 {
keir@19024 153 free_xenheap_pages(table, order);
keir@19024 154 }
keir@19024 155
keir@15957 156 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */