ia64/xen-unstable

changeset 17478:837ea1f0aa8a

AMD IOMMU: Add event logging support.

MSI interrupt will be triggered on IO page fault and the fault address
will be written into an in-memory event log buffer automatically by
hardware. This patch handles the MSI interrupt simply by reading event
log entry and printing out the parsed result.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 16 13:43:23 2008 +0100 (2008-04-16)
parents da4042899fd2
children defbab4dba1a
files xen/drivers/passthrough/amd/iommu_detect.c xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/include/asm-x86/amd-iommu.h xen/include/asm-x86/hvm/svm/amd-iommu-defs.h xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_detect.c	Wed Apr 16 13:40:46 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_detect.c	Wed Apr 16 13:43:23 2008 +0100
     1.3 @@ -85,6 +85,45 @@ int __init get_iommu_last_downstream_bus
     1.4      return 0;
     1.5  }
     1.6  
     1.7 +static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
     1.8 +            struct amd_iommu *iommu)
     1.9 +{
    1.10 +    int cap_ptr, cap_id;
    1.11 +    u32 cap_header;
    1.12 +    u16 control;
    1.13 +    int count = 0;
    1.14 +
    1.15 +    cap_ptr = pci_conf_read8(bus, dev, func,
    1.16 +            PCI_CAPABILITY_LIST);
    1.17 +
    1.18 +    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
    1.19 +        count < PCI_MAX_CAP_BLOCKS )
    1.20 +    {
    1.21 +        cap_ptr &= PCI_CAP_PTR_MASK;
    1.22 +        cap_header = pci_conf_read32(bus, dev, func, cap_ptr);
    1.23 +        cap_id = get_field_from_reg_u32(cap_header,
    1.24 +                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
    1.25 +
    1.26 +        if ( cap_id == PCI_CAP_ID_MSI )
    1.27 +        {
    1.28 +            iommu->msi_cap = cap_ptr;
    1.29 +            break;
    1.30 +        }
    1.31 +        cap_ptr = get_field_from_reg_u32(cap_header,
    1.32 +                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
    1.33 +        count++;
    1.34 +    }
    1.35 +
    1.36 +    if ( !iommu->msi_cap )
    1.37 +        return -ENODEV;
    1.38 +
    1.39 +    dprintk(XENLOG_INFO, "AMD IOMMU: Found MSI capability block \n");
    1.40 +    control = pci_conf_read16(bus, dev, func,
    1.41 +            iommu->msi_cap + PCI_MSI_FLAGS);
    1.42 +    iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT;
    1.43 +    return 0;
    1.44 +}
    1.45 +
    1.46  int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
    1.47                                    struct amd_iommu *iommu)
    1.48  {
    1.49 @@ -133,6 +172,8 @@ int __init get_iommu_capabilities(u8 bus
    1.50      iommu->msi_number = get_field_from_reg_u32(
    1.51          misc_info, PCI_CAP_MSI_NUMBER_MASK, PCI_CAP_MSI_NUMBER_SHIFT);
    1.52  
    1.53 +    get_iommu_msi_capabilities(bus, dev, func, iommu);
    1.54 +
    1.55      return 0;
    1.56  }
    1.57  
     2.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Wed Apr 16 13:40:46 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Wed Apr 16 13:43:23 2008 +0100
     2.3 @@ -27,6 +27,7 @@
     2.4  #include "../pci_regs.h"
     2.5  
     2.6  extern int nr_amd_iommus;
     2.7 +static struct amd_iommu *vector_to_iommu[NR_VECTORS];
     2.8  
     2.9  int __init map_iommu_mmio_region(struct amd_iommu *iommu)
    2.10  {
    2.11 @@ -109,6 +110,33 @@ void __init register_iommu_cmd_buffer_in
    2.12      writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
    2.13  }
    2.14  
    2.15 +void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
    2.16 +{
    2.17 +    u64 addr_64, addr_lo, addr_hi;
    2.18 +    u32 power_of2_entries;
    2.19 +    u32 entry;
    2.20 +
    2.21 +    addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);
    2.22 +    addr_lo = addr_64 & DMA_32BIT_MASK;
    2.23 +    addr_hi = addr_64 >> 32;
    2.24 +
    2.25 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    2.26 +                         IOMMU_EVENT_LOG_BASE_LOW_MASK,
    2.27 +                         IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);
    2.28 +    writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
    2.29 +
    2.30 +    power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
    2.31 +                        IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
    2.32 +
    2.33 +    set_field_in_reg_u32((u32)addr_hi, 0,
    2.34 +                        IOMMU_EVENT_LOG_BASE_HIGH_MASK,
    2.35 +                        IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);
    2.36 +    set_field_in_reg_u32(power_of2_entries, entry,
    2.37 +                        IOMMU_EVENT_LOG_LENGTH_MASK,
    2.38 +                        IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);
    2.39 +    writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
    2.40 +}
    2.41 +
    2.42  static void __init set_iommu_translation_control(struct amd_iommu *iommu,
    2.43                                                   int enable)
    2.44  {
    2.45 @@ -179,10 +207,281 @@ static void __init register_iommu_exclus
    2.46      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
    2.47  }
    2.48  
    2.49 +static void __init set_iommu_event_log_control(struct amd_iommu *iommu,
    2.50 +            int enable)
    2.51 +{
    2.52 +    u32 entry;
    2.53 +
    2.54 +    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    2.55 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    2.56 +                         IOMMU_CONTROL_DISABLED, entry,
    2.57 +                         IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
    2.58 +                         IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
    2.59 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    2.60 +
    2.61 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    2.62 +                         IOMMU_CONTROL_DISABLED, entry,
    2.63 +                         IOMMU_CONTROL_EVENT_LOG_INT_MASK,
    2.64 +                         IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
    2.65 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    2.66 +
    2.67 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    2.68 +                         IOMMU_CONTROL_DISABLED, entry,
    2.69 +                         IOMMU_CONTROL_COMP_WAIT_INT_MASK,
    2.70 +                         IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
    2.71 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    2.72 +}
    2.73 +
    2.74 +static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
    2.75 +{
    2.76 +    u32 tail, head, *event_log;
    2.77 +    int i;
    2.78 +
    2.79 +     BUG_ON( !iommu || !event );
    2.80 +
    2.81 +    /* make sure there's an entry in the log */
    2.82 +    tail = get_field_from_reg_u32(
    2.83 +                readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET),
    2.84 +                IOMMU_EVENT_LOG_TAIL_MASK,
    2.85 +                IOMMU_EVENT_LOG_TAIL_SHIFT);
    2.86 +    if ( tail != iommu->event_log_head )
    2.87 +    {
    2.88 +        /* read event log entry */
    2.89 +        event_log = (u32 *)(iommu->event_log.buffer +
    2.90 +                                        (iommu->event_log_head *
    2.91 +                                        IOMMU_EVENT_LOG_ENTRY_SIZE));
    2.92 +        for ( i = 0; i < IOMMU_EVENT_LOG_U32_PER_ENTRY; i++ )
    2.93 +            event[i] = event_log[i];
    2.94 +        if ( ++iommu->event_log_head == iommu->event_log.entries )
    2.95 +            iommu->event_log_head = 0;
    2.96 +
    2.97 +        /* update head pointer */
    2.98 +        set_field_in_reg_u32(iommu->event_log_head, 0,
    2.99 +                             IOMMU_EVENT_LOG_HEAD_MASK,
   2.100 +                             IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
   2.101 +        writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
   2.102 +        return 0;
   2.103 +    }
   2.104 +
   2.105 +    return -EFAULT;
   2.106 +}
   2.107 +
   2.108 +static void amd_iommu_msi_data_init(struct amd_iommu *iommu, int vector)
   2.109 +{
   2.110 +    u32 msi_data;
   2.111 +    u8 bus = (iommu->bdf >> 8) & 0xff;
   2.112 +    u8 dev = PCI_SLOT(iommu->bdf & 0xff);
   2.113 +    u8 func = PCI_FUNC(iommu->bdf & 0xff);
   2.114 +
   2.115 +    msi_data = MSI_DATA_TRIGGER_EDGE |
   2.116 +        MSI_DATA_LEVEL_ASSERT |
   2.117 +        MSI_DATA_DELIVERY_FIXED |
   2.118 +        MSI_DATA_VECTOR(vector);
   2.119 +
   2.120 +    pci_conf_write32(bus, dev, func,
   2.121 +        iommu->msi_cap + PCI_MSI_DATA_64, msi_data);
   2.122 +}
   2.123 +
   2.124 +static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu)
   2.125 +{
   2.126 +
   2.127 +    int bus = (iommu->bdf >> 8) & 0xff;
   2.128 +    int dev = PCI_SLOT(iommu->bdf & 0xff);
   2.129 +    int func = PCI_FUNC(iommu->bdf & 0xff);
   2.130 +
   2.131 +    u32 address_hi = 0;
   2.132 +    u32 address_lo = MSI_ADDR_HEADER |
   2.133 +            MSI_ADDR_DESTMODE_PHYS |
   2.134 +            MSI_ADDR_REDIRECTION_CPU |
   2.135 +            MSI_ADDR_DESTID_CPU(phy_cpu);
   2.136 +
   2.137 +    pci_conf_write32(bus, dev, func,
   2.138 +        iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);
   2.139 +    pci_conf_write32(bus, dev, func,
   2.140 +        iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);
   2.141 +}
   2.142 +
   2.143 +static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
   2.144 +{
   2.145 +    u16 control;
   2.146 +    int bus = (iommu->bdf >> 8) & 0xff;
   2.147 +    int dev = PCI_SLOT(iommu->bdf & 0xff);
   2.148 +    int func = PCI_FUNC(iommu->bdf & 0xff);
   2.149 +
   2.150 +    control = pci_conf_read16(bus, dev, func,
   2.151 +        iommu->msi_cap + PCI_MSI_FLAGS);
   2.152 +    control &= ~(1);
   2.153 +    if ( flag )
   2.154 +        control |= flag;
   2.155 +    pci_conf_write16(bus, dev, func,
   2.156 +        iommu->msi_cap + PCI_MSI_FLAGS, control);
   2.157 +}
   2.158 +
   2.159 +static void iommu_msi_unmask(unsigned int vector)
   2.160 +{
   2.161 +    unsigned long flags;
   2.162 +    struct amd_iommu *iommu = vector_to_iommu[vector];
   2.163 +
   2.164 +    /* FIXME: do not support mask bits at the moment */
   2.165 +    if ( iommu->maskbit )
   2.166 +        return;
   2.167 +
   2.168 +    spin_lock_irqsave(&iommu->lock, flags);
   2.169 +    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
   2.170 +    spin_unlock_irqrestore(&iommu->lock, flags);
   2.171 +}
   2.172 +
   2.173 +static void iommu_msi_mask(unsigned int vector)
   2.174 +{
   2.175 +    unsigned long flags;
   2.176 +    struct amd_iommu *iommu = vector_to_iommu[vector];
   2.177 +
   2.178 +    /* FIXME: do not support mask bits at the moment */
   2.179 +    if ( iommu->maskbit )
   2.180 +        return;
   2.181 +
   2.182 +    spin_lock_irqsave(&iommu->lock, flags);
   2.183 +    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
   2.184 +    spin_unlock_irqrestore(&iommu->lock, flags);
   2.185 +}
   2.186 +
   2.187 +static unsigned int iommu_msi_startup(unsigned int vector)
   2.188 +{
   2.189 +    iommu_msi_unmask(vector);
   2.190 +    return 0;
   2.191 +}
   2.192 +
   2.193 +static void iommu_msi_end(unsigned int vector)
   2.194 +{
   2.195 +    iommu_msi_unmask(vector);
   2.196 +    ack_APIC_irq();
   2.197 +}
   2.198 +
   2.199 +static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest)
   2.200 +{
   2.201 +    struct amd_iommu *iommu = vector_to_iommu[vector];
   2.202 +    amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
   2.203 +}
   2.204 +
   2.205 +static struct hw_interrupt_type iommu_msi_type = {
   2.206 +    .typename = "AMD_IOV_MSI",
   2.207 +    .startup = iommu_msi_startup,
   2.208 +    .shutdown = iommu_msi_mask,
   2.209 +    .enable = iommu_msi_unmask,
   2.210 +    .disable = iommu_msi_mask,
   2.211 +    .ack = iommu_msi_mask,
   2.212 +    .end = iommu_msi_end,
   2.213 +    .set_affinity = iommu_msi_set_affinity,
   2.214 +};
   2.215 +
   2.216 +static void parse_event_log_entry(u32 entry[])
   2.217 +{
   2.218 +    u16 domain_id, device_id;
   2.219 +    u32 code;
   2.220 +    u64 *addr;
   2.221 +    char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
   2.222 +                                         "IO_PAGE_FALT",
   2.223 +                                         "DEV_TABLE_HW_ERROR",
   2.224 +                                         "PAGE_TABLE_HW_ERROR",
   2.225 +                                         "ILLEGAL_COMMAND_ERROR",
   2.226 +                                         "COMMAND_HW_ERROR",
   2.227 +                                         "IOTLB_INV_TIMEOUT",
   2.228 +                                         "INVALID_DEV_REQUEST"};
   2.229 +
   2.230 +    code = get_field_from_reg_u32(entry[1],
   2.231 +                                           IOMMU_EVENT_CODE_MASK,
   2.232 +                                           IOMMU_EVENT_CODE_SHIFT);
   2.233 +
   2.234 +    if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST)
   2.235 +        || (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
   2.236 +    {
   2.237 +        dprintk(XENLOG_ERR, "Invalid event log entry!\n");
   2.238 +        return;
   2.239 +    }
   2.240 +
   2.241 +    if ( code == IOMMU_EVENT_IO_PAGE_FALT )
   2.242 +    {
   2.243 +        device_id = get_field_from_reg_u32(entry[0],
   2.244 +                                           IOMMU_EVENT_DEVICE_ID_MASK,
   2.245 +                                           IOMMU_EVENT_DEVICE_ID_SHIFT);
   2.246 +        domain_id = get_field_from_reg_u32(entry[1],
   2.247 +                                           IOMMU_EVENT_DOMAIN_ID_MASK,
   2.248 +                                           IOMMU_EVENT_DOMAIN_ID_SHIFT);
   2.249 +        addr= (u64*) (entry + 2);
   2.250 +        dprintk(XENLOG_ERR,
   2.251 +            "%s: domain = %d, device id = 0x%x, fault address = 0x%"PRIx64"\n",
   2.252 +            event_str[code-1], domain_id, device_id, *addr);
   2.253 +    }
   2.254 +}
   2.255 +
   2.256 +static void amd_iommu_page_fault(int vector, void *dev_id,
   2.257 +                             struct cpu_user_regs *regs)
   2.258 +{
   2.259 +    u32  event[4];
   2.260 +    unsigned long flags;
   2.261 +    int ret = 0;
   2.262 +    struct amd_iommu *iommu = dev_id;
   2.263 +
   2.264 +    spin_lock_irqsave(&iommu->lock, flags);
   2.265 +    ret = amd_iommu_read_event_log(iommu, event);
   2.266 +    spin_unlock_irqrestore(&iommu->lock, flags);
   2.267 +
   2.268 +    if ( ret != 0 )
   2.269 +        return;
   2.270 +    parse_event_log_entry(event);
   2.271 +}
   2.272 +
   2.273 +static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
   2.274 +{
   2.275 +    int vector, ret;
   2.276 +    unsigned long flags;
   2.277 +
   2.278 +    vector = assign_irq_vector(AUTO_ASSIGN);
   2.279 +    vector_to_iommu[vector] = iommu;
   2.280 +
   2.281 +    /* make irq == vector */
   2.282 +    irq_vector[vector] = vector;
   2.283 +    vector_irq[vector] = vector;
   2.284 +
   2.285 +    if ( !vector )
   2.286 +    {
   2.287 +        gdprintk(XENLOG_ERR, "AMD IOMMU: no vectors\n");
   2.288 +        return 0;
   2.289 +    }
   2.290 +
   2.291 +    irq_desc[vector].handler = &iommu_msi_type;
   2.292 +    ret = request_irq(vector, amd_iommu_page_fault, 0, "dmar", iommu);
   2.293 +    if ( ret )
   2.294 +    {
   2.295 +        gdprintk(XENLOG_ERR, "AMD IOMMU: can't request irq\n");
   2.296 +        return 0;
   2.297 +    }
   2.298 +
   2.299 +    spin_lock_irqsave(&iommu->lock, flags);
   2.300 +
   2.301 +    amd_iommu_msi_data_init (iommu, vector);
   2.302 +    amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
   2.303 +    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
   2.304 +
   2.305 +    spin_unlock_irqrestore(&iommu->lock, flags);
   2.306 +
   2.307 +    return vector;
   2.308 +}
   2.309 +
   2.310  void __init enable_iommu(struct amd_iommu *iommu)
   2.311  {
   2.312 +    unsigned long flags;
   2.313 +
   2.314 +    set_iommu_interrupt_handler(iommu);
   2.315 +
   2.316 +    spin_lock_irqsave(&iommu->lock, flags);
   2.317 +
   2.318      register_iommu_exclusion_range(iommu);
   2.319      set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
   2.320 +    set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
   2.321      set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
   2.322 +
   2.323 +    spin_unlock_irqrestore(&iommu->lock, flags);
   2.324 +
   2.325      printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
   2.326  }
     3.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Apr 16 13:40:46 2008 +0100
     3.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Apr 16 13:43:23 2008 +0100
     3.3 @@ -29,6 +29,7 @@
     3.4  struct list_head amd_iommu_head;
     3.5  long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
     3.6  static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
     3.7 +static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
     3.8  int nr_amd_iommus = 0;
     3.9  
    3.10  unsigned short ivrs_bdf_entries = 0;
    3.11 @@ -73,7 +74,8 @@ static void __init deallocate_iommu_tabl
    3.12  static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
    3.13  {
    3.14      deallocate_iommu_table_struct(&iommu->dev_table);
    3.15 -    deallocate_iommu_table_struct(&iommu->cmd_buffer);;
    3.16 +    deallocate_iommu_table_struct(&iommu->cmd_buffer);
    3.17 +    deallocate_iommu_table_struct(&iommu->event_log);
    3.18  }
    3.19  
    3.20  static void __init detect_cleanup(void)
    3.21 @@ -139,6 +141,20 @@ static int __init allocate_iommu_resourc
    3.22                                       "Command Buffer") != 0 )
    3.23          goto error_out;
    3.24  
    3.25 +    /* allocate 'event log' in power of 2 increments of 4K */
    3.26 +    iommu->event_log_head = 0;
    3.27 +    iommu->event_log.alloc_size =
    3.28 +        PAGE_SIZE << get_order_from_bytes(
    3.29 +            PAGE_ALIGN(amd_iommu_event_log_entries *
    3.30 +                        IOMMU_EVENT_LOG_ENTRY_SIZE));
    3.31 +
    3.32 +    iommu->event_log.entries =
    3.33 +        iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE;
    3.34 +
    3.35 +    if ( allocate_iommu_table_struct(&iommu->event_log,
    3.36 +                                     "Event Log") != 0 )
    3.37 +        goto error_out;
    3.38 +
    3.39      return 0;
    3.40  
    3.41   error_out:
    3.42 @@ -203,6 +219,7 @@ static int __init amd_iommu_init(void)
    3.43              goto error_out;
    3.44          register_iommu_dev_table_in_mmio_space(iommu);
    3.45          register_iommu_cmd_buffer_in_mmio_space(iommu);
    3.46 +        register_iommu_event_log_in_mmio_space(iommu);
    3.47  
    3.48          spin_unlock_irqrestore(&iommu->lock, flags);
    3.49      }
    3.50 @@ -224,11 +241,9 @@ static int __init amd_iommu_init(void)
    3.51  
    3.52      for_each_amd_iommu ( iommu )
    3.53      {
    3.54 -        spin_lock_irqsave(&iommu->lock, flags);
    3.55          /* enable IOMMU translation services */
    3.56          enable_iommu(iommu);
    3.57          nr_amd_iommus++;
    3.58 -        spin_unlock_irqrestore(&iommu->lock, flags);
    3.59      }
    3.60  
    3.61      amd_iommu_enabled = 1;
    3.62 @@ -288,7 +303,7 @@ void amd_iommu_setup_domain_device(
    3.63          sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
    3.64          dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
    3.65          amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
    3.66 -                                      req_id, sys_mgt, dev_ex,
    3.67 +                                      hd->domain_id, sys_mgt, dev_ex,
    3.68                                        hd->paging_mode);
    3.69  
    3.70          invalidate_dev_table_entry(iommu, req_id);
     4.1 --- a/xen/include/asm-x86/amd-iommu.h	Wed Apr 16 13:40:46 2008 +0100
     4.2 +++ b/xen/include/asm-x86/amd-iommu.h	Wed Apr 16 13:43:23 2008 +0100
     4.3 @@ -79,6 +79,9 @@ struct amd_iommu {
     4.4      int exclusion_allow_all;
     4.5      uint64_t exclusion_base;
     4.6      uint64_t exclusion_limit;
     4.7 +
     4.8 +    int msi_cap;
     4.9 +    int maskbit;
    4.10  };
    4.11  
    4.12  struct ivrs_mappings {
     5.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h	Wed Apr 16 13:40:46 2008 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h	Wed Apr 16 13:43:23 2008 +0100
     5.3 @@ -35,6 +35,9 @@
     5.4  /* IOMMU Command Buffer entries: in power of 2 increments, minimum of 256 */
     5.5  #define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES	512
     5.6  
     5.7 +/* IOMMU Event Log entries: in power of 2 increments, minimum of 256 */
     5.8 +#define IOMMU_EVENT_LOG_DEFAULT_ENTRIES     512
     5.9 +
    5.10  #define BITMAP_ENTRIES_PER_BYTE		8
    5.11  
    5.12  #define PTE_PER_TABLE_SHIFT		9
    5.13 @@ -304,6 +307,11 @@
    5.14  #define IOMMU_EVENT_IOTLB_INV_TIMEOUT		0x7
    5.15  #define IOMMU_EVENT_INVALID_DEV_REQUEST		0x8
    5.16  
    5.17 +#define IOMMU_EVENT_DOMAIN_ID_MASK           0x0000FFFF
    5.18 +#define IOMMU_EVENT_DOMAIN_ID_SHIFT          0
    5.19 +#define IOMMU_EVENT_DEVICE_ID_MASK           0x0000FFFF
    5.20 +#define IOMMU_EVENT_DEVICE_ID_SHIFT          0
    5.21 +
    5.22  /* Control Register */
    5.23  #define IOMMU_CONTROL_MMIO_OFFSET			0x18
    5.24  #define IOMMU_CONTROL_TRANSLATION_ENABLE_MASK		0x00000001
    5.25 @@ -427,4 +435,33 @@
    5.26  #define IOMMU_IO_READ_ENABLED           1
    5.27  #define HACK_BIOS_SETTINGS                  0
    5.28  
    5.29 +/* MSI interrupt */
    5.30 +#define MSI_DATA_VECTOR_SHIFT       0
    5.31 +#define MSI_DATA_VECTOR(v)      (((u8)v) << MSI_DATA_VECTOR_SHIFT)
    5.32 +
    5.33 +#define MSI_DATA_DELIVERY_SHIFT     8
    5.34 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
    5.35 +#define MSI_DATA_DELIVERY_LOWPRI    (1 << MSI_DATA_DELIVERY_SHIFT)
    5.36 +
    5.37 +#define MSI_DATA_LEVEL_SHIFT        14
    5.38 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
    5.39 +#define MSI_DATA_LEVEL_ASSERT   (1 << MSI_DATA_LEVEL_SHIFT)
    5.40 +
    5.41 +#define MSI_DATA_TRIGGER_SHIFT      15
    5.42 +#define MSI_DATA_TRIGGER_EDGE   (0 << MSI_DATA_TRIGGER_SHIFT)
    5.43 +#define  MSI_DATA_TRIGGER_LEVEL  (1 << MSI_DATA_TRIGGER_SHIFT)
    5.44 +
    5.45 +#define MSI_TARGET_CPU_SHIFT        12
    5.46 +#define MSI_ADDR_HEADER         0xfee00000
    5.47 +#define MSI_ADDR_DESTID_MASK        0xfff0000f
    5.48 +#define MSI_ADDR_DESTID_CPU(cpu)    ((cpu) << MSI_TARGET_CPU_SHIFT)
    5.49 +
    5.50 +#define MSI_ADDR_DESTMODE_SHIFT     2
    5.51 +#define MSI_ADDR_DESTMODE_PHYS  (0 << MSI_ADDR_DESTMODE_SHIFT)
    5.52 +#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
    5.53 +
    5.54 +#define MSI_ADDR_REDIRECTION_SHIFT  3
    5.55 +#define MSI_ADDR_REDIRECTION_CPU    (0 << MSI_ADDR_REDIRECTION_SHIFT)
    5.56 +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
    5.57 +
    5.58  #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
     6.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Wed Apr 16 13:40:46 2008 +0100
     6.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Wed Apr 16 13:43:23 2008 +0100
     6.3 @@ -49,6 +49,7 @@ int __init map_iommu_mmio_region(struct 
     6.4  void __init unmap_iommu_mmio_region(struct amd_iommu *iommu);
     6.5  void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu);
     6.6  void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu);
     6.7 +void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu);
     6.8  void __init enable_iommu(struct amd_iommu *iommu);
     6.9  
    6.10  /* mapping functions */