ia64/xen-unstable

changeset 18605:c2fc4d26ef18

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Fri Oct 10 12:06:46 2008 +0900 (2008-10-10)
parents af8eaa3cf782 6ab55f716ce3
children 60bd590a0438
files
line diff
     1.1 --- a/buildconfigs/mk.linux-2.6-common	Fri Oct 10 11:58:03 2008 +0900
     1.2 +++ b/buildconfigs/mk.linux-2.6-common	Fri Oct 10 12:06:46 2008 +0900
     1.3 @@ -75,7 +75,9 @@ endif
     1.4  	# tree. Finally attempt to use make defconfig.
     1.5  	set -e ; \
     1.6  	CONFIG_VERSION=$$(sed -ne 's/$$(XENGUEST)//; s/^EXTRAVERSION = //p' $(LINUX_SRCDIR)/Makefile); \
     1.7 -	if [ -r $(DESTDIR)/boot/config-$(LINUX_VER3)$$CONFIG_VERSION$(EXTRAVERSION) ] ; then \
     1.8 +	if [ ! -z "$(XEN_LINUX_CONFIG)" -a -r $(XEN_LINUX_CONFIG) ]; then \
     1.9 +	  cp $(XEN_LINUX_CONFIG) $(CONFIG_FILE); \
    1.10 +	elif [ -r $(DESTDIR)/boot/config-$(LINUX_VER3)$$CONFIG_VERSION$(EXTRAVERSION) ] ; then \
    1.11  	  cp $(DESTDIR)/boot/config-$(LINUX_VER3)$$CONFIG_VERSION$(EXTRAVERSION) $(CONFIG_FILE) ; \
    1.12          elif [ -e $(LINUX_SRCDIR)/buildconfigs/create_config.sh ] ; then \
    1.13  	  cd $(LINUX_SRCDIR) && sh buildconfigs/create_config.sh \
     2.1 --- a/tools/firmware/hvmloader/smbios.c	Fri Oct 10 11:58:03 2008 +0900
     2.2 +++ b/tools/firmware/hvmloader/smbios.c	Fri Oct 10 12:06:46 2008 +0900
     2.3 @@ -54,13 +54,13 @@ static void *
     2.4  smbios_type_4_init(void *start, unsigned int cpu_number,
     2.5                     char *cpu_manufacturer);
     2.6  static void *
     2.7 -smbios_type_16_init(void *start, uint32_t memory_size_mb);
     2.8 +smbios_type_16_init(void *start, uint32_t memory_size_mb, int nr_mem_devs);
     2.9  static void *
    2.10 -smbios_type_17_init(void *start, uint32_t memory_size_mb);
    2.11 +smbios_type_17_init(void *start, uint32_t memory_size_mb, int instance);
    2.12  static void *
    2.13 -smbios_type_19_init(void *start, uint32_t memory_size_mb);
    2.14 +smbios_type_19_init(void *start, uint32_t memory_size_mb, int instance);
    2.15  static void *
    2.16 -smbios_type_20_init(void *start, uint32_t memory_size_mb);
    2.17 +smbios_type_20_init(void *start, uint32_t memory_size_mb, int instance);
    2.18  static void *
    2.19  smbios_type_32_init(void *start);
    2.20  static void *
    2.21 @@ -92,6 +92,7 @@ write_smbios_tables(void *start,
    2.22      unsigned cpu_num, nr_structs = 0, max_struct_size = 0;
    2.23      char *p, *q;
    2.24      char cpu_manufacturer[15];
    2.25 +    int i, nr_mem_devs;
    2.26  
    2.27      get_cpu_manufacturer(cpu_manufacturer, 15);
    2.28  
    2.29 @@ -111,10 +112,19 @@ write_smbios_tables(void *start,
    2.30      do_struct(smbios_type_3_init(p));
    2.31      for ( cpu_num = 1; cpu_num <= vcpus; cpu_num++ )
    2.32          do_struct(smbios_type_4_init(p, cpu_num, cpu_manufacturer));
    2.33 -    do_struct(smbios_type_16_init(p, memsize));
    2.34 -    do_struct(smbios_type_17_init(p, memsize));
    2.35 -    do_struct(smbios_type_19_init(p, memsize));
    2.36 -    do_struct(smbios_type_20_init(p, memsize));
    2.37 +
    2.38 +    /* Each 'memory device' covers up to 16GB of address space. */
    2.39 +    nr_mem_devs = (memsize + 0x3fff) >> 14;
    2.40 +    do_struct(smbios_type_16_init(p, memsize, nr_mem_devs));
    2.41 +    for ( i = 0; i < nr_mem_devs; i++ )
    2.42 +    {
    2.43 +        uint32_t dev_memsize = ((i == (nr_mem_devs - 1))
    2.44 +                                ? (memsize & 0x3fff) : 0x4000);
    2.45 +        do_struct(smbios_type_17_init(p, dev_memsize, i));
    2.46 +        do_struct(smbios_type_19_init(p, dev_memsize, i));
    2.47 +        do_struct(smbios_type_20_init(p, dev_memsize, i));
    2.48 +    }
    2.49 +
    2.50      do_struct(smbios_type_32_init(p));
    2.51      do_struct(smbios_type_127_init(p));
    2.52  
    2.53 @@ -441,7 +451,7 @@ smbios_type_4_init(
    2.54  
    2.55  /* Type 16 -- Physical Memory Array */
    2.56  static void *
    2.57 -smbios_type_16_init(void *start, uint32_t memsize)
    2.58 +smbios_type_16_init(void *start, uint32_t memsize, int nr_mem_devs)
    2.59  {
    2.60      struct smbios_type_16 *p = (struct smbios_type_16*)start;
    2.61  
    2.62 @@ -456,7 +466,7 @@ smbios_type_16_init(void *start, uint32_
    2.63      p->error_correction = 0x01; /* other */
    2.64      p->maximum_capacity = memsize * 1024;
    2.65      p->memory_error_information_handle = 0xfffe; /* none provided */
    2.66 -    p->number_of_memory_devices = 1;
    2.67 +    p->number_of_memory_devices = nr_mem_devs;
    2.68  
    2.69      start += sizeof(struct smbios_type_16);
    2.70      *((uint16_t *)start) = 0;
    2.71 @@ -465,22 +475,22 @@ smbios_type_16_init(void *start, uint32_
    2.72  
    2.73  /* Type 17 -- Memory Device */
    2.74  static void *
    2.75 -smbios_type_17_init(void *start, uint32_t memory_size_mb)
    2.76 +smbios_type_17_init(void *start, uint32_t memory_size_mb, int instance)
    2.77  {
    2.78 +    char buf[16];
    2.79      struct smbios_type_17 *p = (struct smbios_type_17 *)start;
    2.80      
    2.81      memset(p, 0, sizeof(*p));
    2.82  
    2.83      p->header.type = 17;
    2.84      p->header.length = sizeof(struct smbios_type_17);
    2.85 -    p->header.handle = 0x1100;
    2.86 +    p->header.handle = 0x1100 + instance;
    2.87  
    2.88      p->physical_memory_array_handle = 0x1000;
    2.89      p->total_width = 64;
    2.90      p->data_width = 64;
    2.91 -    /* truncate memory_size_mb to 16 bits and clear most significant
    2.92 -       bit [indicates size in MB] */
    2.93 -    p->size = (uint16_t) memory_size_mb & 0x7fff;
    2.94 +    ASSERT((memory_size_mb & ~0x7fff) == 0);
    2.95 +    p->size = memory_size_mb;
    2.96      p->form_factor = 0x09; /* DIMM */
    2.97      p->device_set = 0;
    2.98      p->device_locator_str = 1;
    2.99 @@ -489,8 +499,11 @@ smbios_type_17_init(void *start, uint32_
   2.100      p->type_detail = 0;
   2.101  
   2.102      start += sizeof(struct smbios_type_17);
   2.103 -    strcpy((char *)start, "DIMM 1");
   2.104 -    start += strlen("DIMM 1") + 1;
   2.105 +    strcpy(start, "DIMM ");
   2.106 +    start += strlen("DIMM ");
   2.107 +    itoa(buf, instance);
   2.108 +    strcpy(start, buf);
   2.109 +    start += strlen(buf) + 1;
   2.110      *((uint8_t *)start) = 0;
   2.111  
   2.112      return start+1;
   2.113 @@ -498,7 +511,7 @@ smbios_type_17_init(void *start, uint32_
   2.114  
   2.115  /* Type 19 -- Memory Array Mapped Address */
   2.116  static void *
   2.117 -smbios_type_19_init(void *start, uint32_t memory_size_mb)
   2.118 +smbios_type_19_init(void *start, uint32_t memory_size_mb, int instance)
   2.119  {
   2.120      struct smbios_type_19 *p = (struct smbios_type_19 *)start;
   2.121      
   2.122 @@ -506,10 +519,10 @@ smbios_type_19_init(void *start, uint32_
   2.123  
   2.124      p->header.type = 19;
   2.125      p->header.length = sizeof(struct smbios_type_19);
   2.126 -    p->header.handle = 0x1300;
   2.127 +    p->header.handle = 0x1300 + instance;
   2.128  
   2.129 -    p->starting_address = 0;
   2.130 -    p->ending_address = (memory_size_mb-1) * 1024;
   2.131 +    p->starting_address = instance << 24;
   2.132 +    p->ending_address = p->starting_address + (memory_size_mb << 10) - 1;
   2.133      p->memory_array_handle = 0x1000;
   2.134      p->partition_width = 1;
   2.135  
   2.136 @@ -520,7 +533,7 @@ smbios_type_19_init(void *start, uint32_
   2.137  
   2.138  /* Type 20 -- Memory Device Mapped Address */
   2.139  static void *
   2.140 -smbios_type_20_init(void *start, uint32_t memory_size_mb)
   2.141 +smbios_type_20_init(void *start, uint32_t memory_size_mb, int instance)
   2.142  {
   2.143      struct smbios_type_20 *p = (struct smbios_type_20 *)start;
   2.144  
   2.145 @@ -528,12 +541,12 @@ smbios_type_20_init(void *start, uint32_
   2.146  
   2.147      p->header.type = 20;
   2.148      p->header.length = sizeof(struct smbios_type_20);
   2.149 -    p->header.handle = 0x1400;
   2.150 +    p->header.handle = 0x1400 + instance;
   2.151  
   2.152 -    p->starting_address = 0;
   2.153 -    p->ending_address = (memory_size_mb-1)*1024;
   2.154 -    p->memory_device_handle = 0x1100;
   2.155 -    p->memory_array_mapped_address_handle = 0x1300;
   2.156 +    p->starting_address = instance << 24;
   2.157 +    p->ending_address = p->starting_address + (memory_size_mb << 10) - 1;
   2.158 +    p->memory_device_handle = 0x1100 + instance;
   2.159 +    p->memory_array_mapped_address_handle = 0x1300 + instance;
   2.160      p->partition_row_position = 1;
   2.161      p->interleave_position = 0;
   2.162      p->interleaved_data_depth = 0;
     3.1 --- a/tools/flask/policy/policy/modules/xen/xen.te	Fri Oct 10 11:58:03 2008 +0900
     3.2 +++ b/tools/flask/policy/policy/modules/xen/xen.te	Fri Oct 10 12:06:46 2008 +0900
     3.3 @@ -33,7 +33,7 @@ define(`create_domain', `
     3.4  				getvcpuinfo getaddrsize getvcpuaffinity};
     3.5  	allow $1 $2:shadow {enable};
     3.6  	allow $1 $2:mmu {map_read map_write memorymap adjust pinpage};
     3.7 -	allow $2 $2:mmu {map_read map_write pinpage};
     3.8 +	allow $2 $2:mmu {map_read map_write adjust pinpage};
     3.9  	allow $2 domio_t:mmu {map_read};
    3.10  	allow $2 $2:grant {query setup};
    3.11  	allow $1 $2:grant {map_read unmap};
    3.12 @@ -110,6 +110,9 @@ allow dom0_t evchn0-U_t:event {send};
    3.13  create_channel(domU_t, dom0_t, evchnU-0_t)
    3.14  allow domU_t evchnU-0_t:event {send};
    3.15  
    3.16 +allow dom0_t dom0_t:event {send};
    3.17 +allow dom0_t domU_t:grant {copy};
    3.18 +
    3.19  manage_domain(dom0_t, domU_t)
    3.20  
    3.21  ################################################################################
     4.1 --- a/tools/misc/xenpm.c	Fri Oct 10 11:58:03 2008 +0900
     4.2 +++ b/tools/misc/xenpm.c	Fri Oct 10 12:06:46 2008 +0900
     4.3 @@ -161,7 +161,7 @@ int main(int argc, char **argv)
     4.4                  break;
     4.5              }
     4.6  
     4.7 -            ret = xc_pm_get_pxstat(xc_fd, 0, pxstat);
     4.8 +            ret = xc_pm_get_pxstat(xc_fd, i, pxstat);
     4.9              if( ret ) {
    4.10                  fprintf(stderr, "failed to get P-states statistics information\n");
    4.11                  free(pxstat->trans_pt);
     5.1 --- a/tools/pygrub/src/LiloConf.py	Fri Oct 10 11:58:03 2008 +0900
     5.2 +++ b/tools/pygrub/src/LiloConf.py	Fri Oct 10 12:06:46 2008 +0900
     5.3 @@ -142,7 +142,7 @@ class LiloConfigFile(object):
     5.4          self.images.append(image)
     5.5  
     5.6      def _get_default(self):
     5.7 -        for i in range(0, len(self.images) - 1):
     5.8 +        for i in range(len(self.images)):
     5.9              if self.images[i].title == self._default:
    5.10                  return i
    5.11          return 0
    5.12 @@ -150,8 +150,8 @@ class LiloConfigFile(object):
    5.13          self._default = val
    5.14      default = property(_get_default, _set_default)
    5.15  
    5.16 -    commands = { "default": "self.default",
    5.17 -                 "timeout": "self.timeout",
    5.18 +    commands = { "default": "default",
    5.19 +                 "timeout": "timeout",
    5.20                   "prompt": None,
    5.21                   "relocatable": None,
    5.22                   }
     6.1 --- a/tools/python/xen/util/xsm/flask/flask.py	Fri Oct 10 11:58:03 2008 +0900
     6.2 +++ b/tools/python/xen/util/xsm/flask/flask.py	Fri Oct 10 12:06:46 2008 +0900
     6.3 @@ -35,7 +35,10 @@ def calc_dom_ssidref_from_info(info):
     6.4      return ssidref
     6.5  
     6.6  def set_security_label(policy, label):
     6.7 -    return label
     6.8 +    if label:
     6.9 +        return label
    6.10 +    else:
    6.11 +        return ""
    6.12  
    6.13  def ssidref2security_label(ssidref):
    6.14      label = ssidref2label(ssidref)
     7.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Fri Oct 10 11:58:03 2008 +0900
     7.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Fri Oct 10 12:06:46 2008 +0900
     7.3 @@ -1303,7 +1303,7 @@ class XendDomainInfo:
     7.4          t.mkdir()
     7.5          t.set_permissions({'dom' : self.domid, 'read' : True})
     7.6          t.write('vm', self.vmpath)
     7.7 -        for i in [ 'device', 'control', 'error' ]:
     7.8 +        for i in [ 'device', 'control', 'error', 'memory' ]:
     7.9              t.mkdir(i)
    7.10              t.set_permissions(i, {'dom' : self.domid})
    7.11  
     8.1 --- a/tools/python/xen/xend/image.py	Fri Oct 10 11:58:03 2008 +0900
     8.2 +++ b/tools/python/xen/xend/image.py	Fri Oct 10 12:06:46 2008 +0900
     8.3 @@ -502,7 +502,7 @@ class ImageHandler:
     8.4          if fifo_fd >= 0:
     8.5              self._openSentinel(sentinel_path_fifo)
     8.6              os.close(fifo_fd)
     8.7 -            self.pid = self.vm._gatherDom(('image/device-model-pid', int))
     8.8 +            self.pid = self.vm.gatherDom(('image/device-model-pid', int))
     8.9              log.debug("%s device model rediscovered, pid %s sentinel fifo %s",
    8.10                      name, self.pid, sentinel_path_fifo)
    8.11              self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
     9.1 --- a/xen/arch/x86/domain.c	Fri Oct 10 11:58:03 2008 +0900
     9.2 +++ b/xen/arch/x86/domain.c	Fri Oct 10 12:06:46 2008 +0900
     9.3 @@ -459,6 +459,7 @@ void arch_domain_destroy(struct domain *
     9.4          hvm_domain_destroy(d);
     9.5  
     9.6      pci_release_devices(d);
     9.7 +    free_domain_pirqs(d);
     9.8      if ( !is_idle_domain(d) )
     9.9          iommu_domain_destroy(d);
    9.10  
    10.1 --- a/xen/arch/x86/hvm/svm/intr.c	Fri Oct 10 11:58:03 2008 +0900
    10.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Fri Oct 10 12:06:46 2008 +0900
    10.3 @@ -124,9 +124,11 @@ static void svm_dirq_assist(struct vcpu 
    10.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
    10.5              continue;
    10.6  
    10.7 +        spin_lock(&d->event_lock);
    10.8          if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
    10.9          {
   10.10              hvm_pci_msi_assert(d, irq);
   10.11 +            spin_unlock(&d->event_lock);
   10.12              continue;
   10.13          }
   10.14  
   10.15 @@ -137,9 +139,7 @@ static void svm_dirq_assist(struct vcpu 
   10.16              device = digl->device;
   10.17              intx = digl->intx;
   10.18              hvm_pci_intx_assert(d, device, intx);
   10.19 -            spin_lock(&hvm_irq_dpci->dirq_lock);
   10.20              hvm_irq_dpci->mirq[irq].pending++;
   10.21 -            spin_unlock(&hvm_irq_dpci->dirq_lock);
   10.22          }
   10.23  
   10.24          /*
   10.25 @@ -151,6 +151,7 @@ static void svm_dirq_assist(struct vcpu 
   10.26           */
   10.27          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
   10.28                    NOW() + PT_IRQ_TIME_OUT);
   10.29 +        spin_unlock(&d->event_lock);
   10.30      }
   10.31  }
   10.32  
    11.1 --- a/xen/arch/x86/hvm/vmsi.c	Fri Oct 10 11:58:03 2008 +0900
    11.2 +++ b/xen/arch/x86/hvm/vmsi.c	Fri Oct 10 12:06:46 2008 +0900
    11.3 @@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int p
    11.4                  "vector=%x trig_mode=%x\n",
    11.5                  dest, dest_mode, delivery_mode, vector, trig_mode);
    11.6  
    11.7 -    if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
    11.8 +    if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
    11.9      {
   11.10          gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
   11.11          return 0;
    12.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Fri Oct 10 11:58:03 2008 +0900
    12.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Fri Oct 10 12:06:46 2008 +0900
    12.3 @@ -127,11 +127,13 @@ static void vmx_dirq_assist(struct vcpu 
    12.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
    12.5              continue;
    12.6  
    12.7 -		if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
    12.8 -		{
    12.9 -			hvm_pci_msi_assert(d, irq);
   12.10 -			continue;
   12.11 -		}
   12.12 +        spin_lock(&d->event_lock);
   12.13 +        if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
   12.14 +        {
   12.15 +            hvm_pci_msi_assert(d, irq);
   12.16 +            spin_unlock(&d->event_lock);
   12.17 +            continue;
   12.18 +        }
   12.19  
   12.20          stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
   12.21  
   12.22 @@ -140,9 +142,7 @@ static void vmx_dirq_assist(struct vcpu 
   12.23              device = digl->device;
   12.24              intx = digl->intx;
   12.25              hvm_pci_intx_assert(d, device, intx);
   12.26 -            spin_lock(&hvm_irq_dpci->dirq_lock);
   12.27              hvm_irq_dpci->mirq[irq].pending++;
   12.28 -            spin_unlock(&hvm_irq_dpci->dirq_lock);
   12.29          }
   12.30  
   12.31          /*
   12.32 @@ -154,6 +154,7 @@ static void vmx_dirq_assist(struct vcpu 
   12.33           */
   12.34          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
   12.35                    NOW() + PT_IRQ_TIME_OUT);
   12.36 +        spin_unlock(&d->event_lock);
   12.37      }
   12.38  }
   12.39  
    13.1 --- a/xen/arch/x86/i8259.c	Fri Oct 10 11:58:03 2008 +0900
    13.2 +++ b/xen/arch/x86/i8259.c	Fri Oct 10 12:06:46 2008 +0900
    13.3 @@ -408,6 +408,10 @@ void __init init_IRQ(void)
    13.4          irq_desc[LEGACY_VECTOR(i)].handler = &i8259A_irq_type;
    13.5      }
    13.6  
    13.7 +    /* Never allocate the hypercall vector or Linux/BSD fast-trap vector. */
    13.8 +    vector_irq[HYPERCALL_VECTOR] = NEVER_ASSIGN;
    13.9 +    vector_irq[0x80] = NEVER_ASSIGN;
   13.10 +
   13.11      apic_intr_init();
   13.12  
   13.13      /* Set the clock to HZ Hz */
    14.1 --- a/xen/arch/x86/io_apic.c	Fri Oct 10 11:58:03 2008 +0900
    14.2 +++ b/xen/arch/x86/io_apic.c	Fri Oct 10 12:06:46 2008 +0900
    14.3 @@ -87,7 +87,8 @@ static struct irq_pin_list {
    14.4  } irq_2_pin[PIN_MAP_SIZE];
    14.5  static int irq_2_pin_free_entry = NR_IRQS;
    14.6  
    14.7 -int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
    14.8 +int vector_irq[NR_VECTORS] __read_mostly = {
    14.9 +    [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN};
   14.10  
   14.11  /*
   14.12   * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
   14.13 @@ -666,40 +667,47 @@ static inline int IO_APIC_irq_trigger(in
   14.14  /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
   14.15  u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
   14.16  
   14.17 +int free_irq_vector(int vector)
   14.18 +{
   14.19 +    int irq;
   14.20 +
   14.21 +    BUG_ON((vector > LAST_DYNAMIC_VECTOR) || (vector < FIRST_DYNAMIC_VECTOR));
   14.22 +
   14.23 +    spin_lock(&vector_lock);
   14.24 +    if ((irq = vector_irq[vector]) == AUTO_ASSIGN)
   14.25 +        vector_irq[vector] = FREE_TO_ASSIGN;
   14.26 +    spin_unlock(&vector_lock);
   14.27 +
   14.28 +    return (irq == AUTO_ASSIGN) ? 0 : -EINVAL;
   14.29 +}
   14.30 +
   14.31  int assign_irq_vector(int irq)
   14.32  {
   14.33 -    static unsigned current_vector = FIRST_DYNAMIC_VECTOR, offset = 0;
   14.34 +    static unsigned current_vector = FIRST_DYNAMIC_VECTOR;
   14.35      unsigned vector;
   14.36  
   14.37      BUG_ON(irq >= NR_IRQ_VECTORS);
   14.38 +
   14.39      spin_lock(&vector_lock);
   14.40  
   14.41 -    if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
   14.42 +    if ((irq != AUTO_ASSIGN) && (IO_APIC_VECTOR(irq) > 0)) {
   14.43          spin_unlock(&vector_lock);
   14.44          return IO_APIC_VECTOR(irq);
   14.45      }
   14.46  
   14.47 -next:
   14.48 -    current_vector += 8;
   14.49 -
   14.50 -    /* Skip the hypercall vector. */
   14.51 -    if (current_vector == HYPERCALL_VECTOR)
   14.52 -        goto next;
   14.53 +    vector = current_vector;
   14.54 +    while (vector_irq[vector] != FREE_TO_ASSIGN) {
   14.55 +        vector += 8;
   14.56 +        if (vector > LAST_DYNAMIC_VECTOR)
   14.57 +            vector = FIRST_DYNAMIC_VECTOR + ((vector + 1) & 7);
   14.58  
   14.59 -    /* Skip the Linux/BSD fast-trap vector. */
   14.60 -    if (current_vector == 0x80)
   14.61 -        goto next;
   14.62 -
   14.63 -    if (current_vector > LAST_DYNAMIC_VECTOR) {
   14.64 -        offset++;
   14.65 -        if (!(offset%8)) {
   14.66 +        if (vector == current_vector) {
   14.67              spin_unlock(&vector_lock);
   14.68              return -ENOSPC;
   14.69          }
   14.70 -        current_vector = FIRST_DYNAMIC_VECTOR + offset;
   14.71      }
   14.72  
   14.73 -    vector = current_vector;
   14.74 +    current_vector = vector;
   14.75      vector_irq[vector] = irq;
   14.76      if (irq != AUTO_ASSIGN)
   14.77          IO_APIC_VECTOR(irq) = vector;
    15.1 --- a/xen/arch/x86/irq.c	Fri Oct 10 11:58:03 2008 +0900
    15.2 +++ b/xen/arch/x86/irq.c	Fri Oct 10 12:06:46 2008 +0900
    15.3 @@ -14,8 +14,11 @@
    15.4  #include <xen/sched.h>
    15.5  #include <xen/keyhandler.h>
    15.6  #include <xen/compat.h>
    15.7 +#include <xen/iocap.h>
    15.8 +#include <xen/iommu.h>
    15.9 +#include <asm/msi.h>
   15.10  #include <asm/current.h>
   15.11 -#include <xen/iommu.h>
   15.12 +#include <public/physdev.h>
   15.13  
   15.14  /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
   15.15  int opt_noirqbalance = 0;
   15.16 @@ -282,7 +285,7 @@ static void __do_IRQ_guest(int vector)
   15.17   * The descriptor is returned locked. This function is safe against changes
   15.18   * to the per-domain irq-to-vector mapping.
   15.19   */
   15.20 -static irq_desc_t *domain_spin_lock_irq_desc(
   15.21 +irq_desc_t *domain_spin_lock_irq_desc(
   15.22      struct domain *d, int irq, unsigned long *pflags)
   15.23  {
   15.24      unsigned int vector;
   15.25 @@ -511,7 +514,7 @@ int pirq_guest_bind(struct vcpu *v, int 
   15.26      int                 rc = 0;
   15.27      cpumask_t           cpumask = CPU_MASK_NONE;
   15.28  
   15.29 -    WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
   15.30 +    WARN_ON(!spin_is_locked(&v->domain->event_lock));
   15.31      BUG_ON(!local_irq_is_enabled());
   15.32  
   15.33   retry:
   15.34 @@ -681,7 +684,7 @@ void pirq_guest_unbind(struct domain *d,
   15.35      irq_desc_t *desc;
   15.36      int vector;
   15.37  
   15.38 -    WARN_ON(!spin_is_locked(&d->evtchn_lock));
   15.39 +    WARN_ON(!spin_is_locked(&d->event_lock));
   15.40  
   15.41      BUG_ON(!local_irq_is_enabled());
   15.42      desc = domain_spin_lock_irq_desc(d, irq, NULL);
   15.43 @@ -708,7 +711,7 @@ int pirq_guest_force_unbind(struct domai
   15.44      irq_guest_action_t *action;
   15.45      int i, bound = 0;
   15.46  
   15.47 -    WARN_ON(!spin_is_locked(&d->evtchn_lock));
   15.48 +    WARN_ON(!spin_is_locked(&d->event_lock));
   15.49  
   15.50      BUG_ON(!local_irq_is_enabled());
   15.51      desc = domain_spin_lock_irq_desc(d, irq, NULL);
   15.52 @@ -731,6 +734,173 @@ int pirq_guest_force_unbind(struct domai
   15.53      return bound;
   15.54  }
   15.55  
   15.56 +int get_free_pirq(struct domain *d, int type, int index)
   15.57 +{
   15.58 +    int i;
   15.59 +
   15.60 +    ASSERT(spin_is_locked(&d->event_lock));
   15.61 +
   15.62 +    if ( type == MAP_PIRQ_TYPE_GSI )
   15.63 +    {
   15.64 +        for ( i = 16; i < NR_PIRQS; i++ )
   15.65 +            if ( !d->arch.pirq_vector[i] )
   15.66 +                break;
   15.67 +        if ( i == NR_PIRQS )
   15.68 +            return -ENOSPC;
   15.69 +    }
   15.70 +    else
   15.71 +    {
   15.72 +        for ( i = NR_PIRQS - 1; i >= 16; i-- )
   15.73 +            if ( !d->arch.pirq_vector[i] )
   15.74 +                break;
   15.75 +        if ( i == 16 )
   15.76 +            return -ENOSPC;
   15.77 +    }
   15.78 +
   15.79 +    return i;
   15.80 +}
   15.81 +
   15.82 +int map_domain_pirq(
   15.83 +    struct domain *d, int pirq, int vector, int type, void *data)
   15.84 +{
   15.85 +    int ret = 0;
   15.86 +    int old_vector, old_pirq;
   15.87 +    irq_desc_t *desc;
   15.88 +    unsigned long flags;
   15.89 +
   15.90 +    ASSERT(spin_is_locked(&d->event_lock));
   15.91 +
   15.92 +    if ( !IS_PRIV(current->domain) )
   15.93 +        return -EPERM;
   15.94 +
   15.95 +    if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
   15.96 +    {
   15.97 +        dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
   15.98 +                d->domain_id, pirq, vector);
   15.99 +        return -EINVAL;
  15.100 +    }
  15.101 +
  15.102 +    old_vector = d->arch.pirq_vector[pirq];
  15.103 +    old_pirq = d->arch.vector_pirq[vector];
  15.104 +
  15.105 +    if ( (old_vector && (old_vector != vector) ) ||
  15.106 +         (old_pirq && (old_pirq != pirq)) )
  15.107 +    {
  15.108 +        dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
  15.109 +                d->domain_id, pirq, vector);
  15.110 +        return -EINVAL;
  15.111 +    }
  15.112 +
  15.113 +    ret = irq_permit_access(d, pirq);
  15.114 +    if ( ret )
  15.115 +    {
  15.116 +        dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
  15.117 +                d->domain_id, pirq);
  15.118 +        return ret;
  15.119 +    }
  15.120 +
  15.121 +    desc = &irq_desc[vector];
  15.122 +    spin_lock_irqsave(&desc->lock, flags);
  15.123 +
  15.124 +    if ( type == MAP_PIRQ_TYPE_MSI )
  15.125 +    {
  15.126 +        struct msi_info *msi = (struct msi_info *)data;
  15.127 +        if ( desc->handler != &no_irq_type )
  15.128 +            dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
  15.129 +                    d->domain_id, vector);
  15.130 +        desc->handler = &pci_msi_type;
  15.131 +        ret = pci_enable_msi(msi);
  15.132 +        if ( ret )
  15.133 +            goto done;
  15.134 +    }
  15.135 +
  15.136 +    d->arch.pirq_vector[pirq] = vector;
  15.137 +    d->arch.vector_pirq[vector] = pirq;
  15.138 +
  15.139 +done:
  15.140 +    spin_unlock_irqrestore(&desc->lock, flags);
  15.141 +    return ret;
  15.142 +}
  15.143 +
  15.144 +/* The pirq should have been unbound before this call. */
  15.145 +int unmap_domain_pirq(struct domain *d, int pirq)
  15.146 +{
  15.147 +    unsigned long flags;
  15.148 +    irq_desc_t *desc;
  15.149 +    int vector, ret = 0;
  15.150 +    bool_t forced_unbind;
  15.151 +
  15.152 +    if ( (pirq < 0) || (pirq >= NR_PIRQS) )
  15.153 +        return -EINVAL;
  15.154 +
  15.155 +    if ( !IS_PRIV(current->domain) )
  15.156 +        return -EINVAL;
  15.157 +
  15.158 +    ASSERT(spin_is_locked(&d->event_lock));
  15.159 +
  15.160 +    vector = d->arch.pirq_vector[pirq];
  15.161 +    if ( vector <= 0 )
  15.162 +    {
  15.163 +        dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
  15.164 +                d->domain_id, pirq);
  15.165 +        ret = -EINVAL;
  15.166 +        goto done;
  15.167 +    }
  15.168 +
  15.169 +    forced_unbind = pirq_guest_force_unbind(d, pirq);
  15.170 +    if ( forced_unbind )
  15.171 +        dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
  15.172 +                d->domain_id, pirq);
  15.173 +
  15.174 +    desc = &irq_desc[vector];
  15.175 +    spin_lock_irqsave(&desc->lock, flags);
  15.176 +
  15.177 +    BUG_ON(vector != d->arch.pirq_vector[pirq]);
  15.178 +
  15.179 +    if ( desc->msi_desc )
  15.180 +        pci_disable_msi(vector);
  15.181 +
  15.182 +    if ( desc->handler == &pci_msi_type )
  15.183 +    {
  15.184 +        desc->handler = &no_irq_type;
  15.185 +        free_irq_vector(vector);
  15.186 +    }
  15.187 +
  15.188 +    if ( !forced_unbind )
  15.189 +    {
  15.190 +        d->arch.pirq_vector[pirq] = 0;
  15.191 +        d->arch.vector_pirq[vector] = 0;
  15.192 +    }
  15.193 +    else
  15.194 +    {
  15.195 +        d->arch.pirq_vector[pirq] = -vector;
  15.196 +        d->arch.vector_pirq[vector] = -pirq;
  15.197 +    }
  15.198 +
  15.199 +    spin_unlock_irqrestore(&desc->lock, flags);
  15.200 +
  15.201 +    ret = irq_deny_access(d, pirq);
  15.202 +    if ( ret )
  15.203 +        dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
  15.204 +                d->domain_id, pirq);
  15.205 +
  15.206 + done:
  15.207 +    return ret;
  15.208 +}
  15.209 +
  15.210 +void free_domain_pirqs(struct domain *d)
  15.211 +{
  15.212 +    int i;
  15.213 +
  15.214 +    spin_lock(&d->event_lock);
  15.215 +
  15.216 +    for ( i = 0; i < NR_PIRQS; i++ )
  15.217 +        if ( d->arch.pirq_vector[i] > 0 )
  15.218 +            unmap_domain_pirq(d, i);
  15.219 +
  15.220 +    spin_unlock(&d->event_lock);
  15.221 +}
  15.222 +
  15.223  extern void dump_ioapic_irq_info(void);
  15.224  
  15.225  static void dump_irqs(unsigned char key)
    16.1 --- a/xen/arch/x86/mm.c	Fri Oct 10 11:58:03 2008 +0900
    16.2 +++ b/xen/arch/x86/mm.c	Fri Oct 10 12:06:46 2008 +0900
    16.3 @@ -3067,7 +3067,7 @@ static int destroy_grant_pte_mapping(
    16.4      ol1e = *(l1_pgentry_t *)va;
    16.5      
    16.6      /* Check that the virtual address supplied is actually mapped to frame. */
    16.7 -    if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
    16.8 +    if ( unlikely(l1e_get_pfn(ol1e) != frame) )
    16.9      {
   16.10          page_unlock(page);
   16.11          MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
    17.1 --- a/xen/arch/x86/pci.c	Fri Oct 10 11:58:03 2008 +0900
    17.2 +++ b/xen/arch/x86/pci.c	Fri Oct 10 12:06:46 2008 +0900
    17.3 @@ -1,12 +1,9 @@
    17.4  /******************************************************************************
    17.5   * pci.c
    17.6   * 
    17.7 - * PCI access functions.
    17.8 + * Architecture-dependent PCI access functions.
    17.9   */
   17.10  
   17.11 -#include <xen/config.h>
   17.12 -#include <xen/pci.h>
   17.13 -#include <xen/pci_regs.h>
   17.14  #include <xen/spinlock.h>
   17.15  #include <asm/io.h>
   17.16  
   17.17 @@ -118,59 +115,3 @@ void pci_conf_write32(
   17.18      pci_conf_write(PCI_CONF_ADDRESS(bus, dev, func, reg), 0, 4, data);
   17.19  }
   17.20  
   17.21 -int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap)
   17.22 -{
   17.23 -    u8 id;
   17.24 -    int max_cap = 48;
   17.25 -    u8 pos = PCI_CAPABILITY_LIST;
   17.26 -    u16 status;
   17.27 -
   17.28 -    status = pci_conf_read16(bus, dev, func, PCI_STATUS);
   17.29 -    if ( (status & PCI_STATUS_CAP_LIST) == 0 )
   17.30 -        return 0;
   17.31 -
   17.32 -    while ( max_cap-- )
   17.33 -    {
   17.34 -        pos = pci_conf_read8(bus, dev, func, pos);
   17.35 -        if ( pos < 0x40 )
   17.36 -            break;
   17.37 -
   17.38 -        pos &= ~3;
   17.39 -        id = pci_conf_read8(bus, dev, func, pos + PCI_CAP_LIST_ID);
   17.40 -
   17.41 -        if ( id == 0xff )
   17.42 -            break;
   17.43 -        else if ( id == cap )
   17.44 -            return pos;
   17.45 -
   17.46 -        pos += PCI_CAP_LIST_NEXT;
   17.47 -    }
   17.48 -
   17.49 -    return 0;
   17.50 -}
   17.51 -
   17.52 -int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap)
   17.53 -{
   17.54 -    u8 id;
   17.55 -    int ttl = 48;
   17.56 -
   17.57 -    while ( ttl-- )
   17.58 -    {
   17.59 -        pos = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
   17.60 -        if ( pos < 0x40 )
   17.61 -            break;
   17.62 -
   17.63 -        pos &= ~3;
   17.64 -        id = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   17.65 -                            pos + PCI_CAP_LIST_ID);
   17.66 -
   17.67 -        if ( id == 0xff )
   17.68 -            break;
   17.69 -        if ( id == cap )
   17.70 -            return pos;
   17.71 -
   17.72 -        pos += PCI_CAP_LIST_NEXT;
   17.73 -    }
   17.74 -    return 0;
   17.75 -}
   17.76 -
    18.1 --- a/xen/arch/x86/physdev.c	Fri Oct 10 11:58:03 2008 +0900
    18.2 +++ b/xen/arch/x86/physdev.c	Fri Oct 10 12:06:46 2008 +0900
    18.3 @@ -26,168 +26,12 @@ int
    18.4  ioapic_guest_write(
    18.5      unsigned long physbase, unsigned int reg, u32 pval);
    18.6  
    18.7 -static int get_free_pirq(struct domain *d, int type, int index)
    18.8 -{
    18.9 -    int i;
   18.10 -
   18.11 -    ASSERT(spin_is_locked(&d->evtchn_lock));
   18.12 -
   18.13 -    if ( type == MAP_PIRQ_TYPE_GSI )
   18.14 -    {
   18.15 -        for ( i = 16; i < NR_PIRQS; i++ )
   18.16 -            if ( !d->arch.pirq_vector[i] )
   18.17 -                break;
   18.18 -        if ( i == NR_PIRQS )
   18.19 -            return -ENOSPC;
   18.20 -    }
   18.21 -    else
   18.22 -    {
   18.23 -        for ( i = NR_PIRQS - 1; i >= 16; i-- )
   18.24 -            if ( !d->arch.pirq_vector[i] )
   18.25 -                break;
   18.26 -        if ( i == 16 )
   18.27 -            return -ENOSPC;
   18.28 -    }
   18.29 -
   18.30 -    return i;
   18.31 -}
   18.32 -
   18.33 -static int map_domain_pirq(struct domain *d, int pirq, int vector,
   18.34 -                           struct physdev_map_pirq *map)
   18.35 -{
   18.36 -    int ret = 0;
   18.37 -    int old_vector, old_pirq;
   18.38 -    struct msi_info msi;
   18.39 -    irq_desc_t *desc;
   18.40 -    unsigned long flags;
   18.41 -
   18.42 -    ASSERT(spin_is_locked(&d->evtchn_lock));
   18.43 -
   18.44 -    if ( !IS_PRIV(current->domain) )
   18.45 -        return -EPERM;
   18.46 -
   18.47 -    if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
   18.48 -    {
   18.49 -        dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
   18.50 -                d->domain_id, pirq, vector);
   18.51 -        return -EINVAL;
   18.52 -    }
   18.53 -
   18.54 -    old_vector = d->arch.pirq_vector[pirq];
   18.55 -    old_pirq = d->arch.vector_pirq[vector];
   18.56 -
   18.57 -    if ( (old_vector && (old_vector != vector) ) ||
   18.58 -         (old_pirq && (old_pirq != pirq)) )
   18.59 -    {
   18.60 -        dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
   18.61 -                d->domain_id, pirq, vector);
   18.62 -        return -EINVAL;
   18.63 -    }
   18.64 -
   18.65 -    ret = irq_permit_access(d, pirq);
   18.66 -    if ( ret )
   18.67 -    {
   18.68 -        dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
   18.69 -                d->domain_id, pirq);
   18.70 -        return ret;
   18.71 -    }
   18.72 -
   18.73 -    desc = &irq_desc[vector];
   18.74 -    spin_lock_irqsave(&desc->lock, flags);
   18.75 -
   18.76 -    if ( map && MAP_PIRQ_TYPE_MSI == map->type )
   18.77 -    {
   18.78 -        if ( desc->handler != &no_irq_type )
   18.79 -            dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
   18.80 -                    d->domain_id, vector);
   18.81 -        desc->handler = &pci_msi_type;
   18.82 -
   18.83 -        msi.bus = map->bus;
   18.84 -        msi.devfn = map->devfn;
   18.85 -        msi.entry_nr = map->entry_nr;
   18.86 -        msi.table_base = map->table_base;
   18.87 -        msi.vector = vector;
   18.88 -
   18.89 -        ret = pci_enable_msi(&msi);
   18.90 -        if ( ret )
   18.91 -            goto done;
   18.92 -    }
   18.93 -
   18.94 -    d->arch.pirq_vector[pirq] = vector;
   18.95 -    d->arch.vector_pirq[vector] = pirq;
   18.96 -
   18.97 -done:
   18.98 -    spin_unlock_irqrestore(&desc->lock, flags);
   18.99 -    return ret;
  18.100 -}
  18.101 -
  18.102 -/* The pirq should have been unbound before this call. */
  18.103 -static int unmap_domain_pirq(struct domain *d, int pirq)
  18.104 -{
  18.105 -    unsigned long flags;
  18.106 -    irq_desc_t *desc;
  18.107 -    int vector, ret = 0;
  18.108 -    bool_t forced_unbind;
  18.109 -
  18.110 -    if ( (pirq < 0) || (pirq >= NR_PIRQS) )
  18.111 -        return -EINVAL;
  18.112 -
  18.113 -    if ( !IS_PRIV(current->domain) )
  18.114 -        return -EINVAL;
  18.115 -
  18.116 -    ASSERT(spin_is_locked(&d->evtchn_lock));
  18.117 -
  18.118 -    vector = d->arch.pirq_vector[pirq];
  18.119 -    if ( vector <= 0 )
  18.120 -    {
  18.121 -        dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
  18.122 -                d->domain_id, pirq);
  18.123 -        ret = -EINVAL;
  18.124 -        goto done;
  18.125 -    }
  18.126 -
  18.127 -    forced_unbind = pirq_guest_force_unbind(d, pirq);
  18.128 -    if ( forced_unbind )
  18.129 -        dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
  18.130 -                d->domain_id, pirq);
  18.131 -
  18.132 -    desc = &irq_desc[vector];
  18.133 -    spin_lock_irqsave(&desc->lock, flags);
  18.134 -
  18.135 -    BUG_ON(vector != d->arch.pirq_vector[pirq]);
  18.136 -
  18.137 -    if ( desc->msi_desc )
  18.138 -        pci_disable_msi(vector);
  18.139 -
  18.140 -    if ( desc->handler == &pci_msi_type )
  18.141 -        desc->handler = &no_irq_type;
  18.142 -
  18.143 -    if ( !forced_unbind )
  18.144 -    {
  18.145 -        d->arch.pirq_vector[pirq] = 0;
  18.146 -        d->arch.vector_pirq[vector] = 0;
  18.147 -    }
  18.148 -    else
  18.149 -    {
  18.150 -        d->arch.pirq_vector[pirq] = -vector;
  18.151 -        d->arch.vector_pirq[vector] = -pirq;
  18.152 -    }
  18.153 -
  18.154 -    spin_unlock_irqrestore(&desc->lock, flags);
  18.155 -
  18.156 -    ret = irq_deny_access(d, pirq);
  18.157 -    if ( ret )
  18.158 -        dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
  18.159 -                d->domain_id, pirq);
  18.160 -
  18.161 - done:
  18.162 -    return ret;
  18.163 -}
  18.164 -
  18.165  static int physdev_map_pirq(struct physdev_map_pirq *map)
  18.166  {
  18.167      struct domain *d;
  18.168      int vector, pirq, ret = 0;
  18.169 +    struct msi_info _msi;
  18.170 +    void *map_data = NULL;
  18.171  
  18.172      if ( !IS_PRIV(current->domain) )
  18.173          return -EPERM;
  18.174 @@ -206,6 +50,7 @@ static int physdev_map_pirq(struct physd
  18.175          goto free_domain;
  18.176      }
  18.177  
  18.178 +    /* Verify or get vector. */
  18.179      switch ( map->type )
  18.180      {
  18.181          case MAP_PIRQ_TYPE_GSI:
  18.182 @@ -220,31 +65,42 @@ static int physdev_map_pirq(struct physd
  18.183              if ( !vector )
  18.184              {
  18.185                  dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
  18.186 -                        d->domain_id, map->index);
  18.187 +                        d->domain_id, vector);
  18.188                  ret = -EINVAL;
  18.189                  goto free_domain;
  18.190              }
  18.191              break;
  18.192 +
  18.193          case MAP_PIRQ_TYPE_MSI:
  18.194              vector = map->index;
  18.195 -			if ( vector == -1 )
  18.196 -				vector = assign_irq_vector(AUTO_ASSIGN);
  18.197 +            if ( vector == -1 )
  18.198 +                vector = assign_irq_vector(AUTO_ASSIGN);
  18.199  
  18.200              if ( vector < 0 || vector >= NR_VECTORS )
  18.201              {
  18.202                  dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n",
  18.203 -                        d->domain_id, map->index);
  18.204 +                        d->domain_id, vector);
  18.205                  ret = -EINVAL;
  18.206                  goto free_domain;
  18.207              }
  18.208 +
  18.209 +            _msi.bus = map->bus;
  18.210 +            _msi.devfn = map->devfn;
  18.211 +            _msi.entry_nr = map->entry_nr;
  18.212 +            _msi.table_base = map->table_base;
  18.213 +            _msi.vector = vector;
  18.214 +            map_data = &_msi;
  18.215              break;
  18.216 +
  18.217          default:
  18.218 -            dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type);
  18.219 +            dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
  18.220 +                    d->domain_id, map->type);
  18.221              ret = -EINVAL;
  18.222              goto free_domain;
  18.223      }
  18.224  
  18.225 -    spin_lock(&d->evtchn_lock);
  18.226 +    /* Verify or get pirq. */
  18.227 +    spin_lock(&d->event_lock);
  18.228      if ( map->pirq < 0 )
  18.229      {
  18.230          if ( d->arch.vector_pirq[vector] )
  18.231 @@ -284,13 +140,14 @@ static int physdev_map_pirq(struct physd
  18.232              pirq = map->pirq;
  18.233      }
  18.234  
  18.235 -
  18.236 -    ret = map_domain_pirq(d, pirq, vector, map);
  18.237 +    ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
  18.238 +    if ( ret == 0 )
  18.239 +        map->pirq = pirq;
  18.240  
  18.241 -    if ( !ret )
  18.242 -        map->pirq = pirq;
  18.243  done:
  18.244 -    spin_unlock(&d->evtchn_lock);
  18.245 +    spin_unlock(&d->event_lock);
  18.246 +    if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
  18.247 +        free_irq_vector(vector);
  18.248  free_domain:
  18.249      rcu_unlock_domain(d);
  18.250      return ret;
  18.251 @@ -312,9 +169,9 @@ static int physdev_unmap_pirq(struct phy
  18.252      if ( d == NULL )
  18.253          return -ESRCH;
  18.254  
  18.255 -    spin_lock(&d->evtchn_lock);
  18.256 +    spin_lock(&d->event_lock);
  18.257      ret = unmap_domain_pirq(d, unmap->pirq);
  18.258 -    spin_unlock(&d->evtchn_lock);
  18.259 +    spin_unlock(&d->event_lock);
  18.260  
  18.261      rcu_unlock_domain(d);
  18.262  
  18.263 @@ -441,9 +298,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
  18.264  
  18.265          irq_op.vector = assign_irq_vector(irq);
  18.266  
  18.267 -        spin_lock(&dom0->evtchn_lock);
  18.268 -        ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, NULL);
  18.269 -        spin_unlock(&dom0->evtchn_lock);
  18.270 +        spin_lock(&dom0->event_lock);
  18.271 +        ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
  18.272 +                              MAP_PIRQ_TYPE_GSI, NULL);
  18.273 +        spin_unlock(&dom0->event_lock);
  18.274  
  18.275          if ( copy_to_guest(arg, &irq_op, 1) != 0 )
  18.276              ret = -EFAULT;
    19.1 --- a/xen/arch/x86/x86_32/xen.lds.S	Fri Oct 10 11:58:03 2008 +0900
    19.2 +++ b/xen/arch/x86/x86_32/xen.lds.S	Fri Oct 10 12:06:46 2008 +0900
    19.3 @@ -69,6 +69,7 @@ SECTIONS
    19.4    . = ALIGN(PAGE_SIZE);
    19.5    __init_end = .;
    19.6  
    19.7 +  __per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
    19.8    __per_cpu_start = .;
    19.9    .data.percpu : { *(.data.percpu) } :text
   19.10    __per_cpu_data_end = .;
    20.1 --- a/xen/arch/x86/x86_64/xen.lds.S	Fri Oct 10 11:58:03 2008 +0900
    20.2 +++ b/xen/arch/x86/x86_64/xen.lds.S	Fri Oct 10 12:06:46 2008 +0900
    20.3 @@ -67,6 +67,7 @@ SECTIONS
    20.4    . = ALIGN(PAGE_SIZE);
    20.5    __init_end = .;
    20.6  
    20.7 +  __per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
    20.8    __per_cpu_start = .;
    20.9    .data.percpu : { *(.data.percpu) } :text
   20.10    __per_cpu_data_end = .;
    21.1 --- a/xen/common/event_channel.c	Fri Oct 10 11:58:03 2008 +0900
    21.2 +++ b/xen/common/event_channel.c	Fri Oct 10 12:06:46 2008 +0900
    21.3 @@ -133,7 +133,7 @@ static long evtchn_alloc_unbound(evtchn_
    21.4      if ( rc )
    21.5          return rc;
    21.6  
    21.7 -    spin_lock(&d->evtchn_lock);
    21.8 +    spin_lock(&d->event_lock);
    21.9  
   21.10      if ( (port = get_free_port(d)) < 0 )
   21.11          ERROR_EXIT_DOM(port, d);
   21.12 @@ -150,7 +150,7 @@ static long evtchn_alloc_unbound(evtchn_
   21.13      alloc->port = port;
   21.14  
   21.15   out:
   21.16 -    spin_unlock(&d->evtchn_lock);
   21.17 +    spin_unlock(&d->event_lock);
   21.18      rcu_unlock_domain(d);
   21.19  
   21.20      return rc;
   21.21 @@ -174,14 +174,14 @@ static long evtchn_bind_interdomain(evtc
   21.22      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
   21.23      if ( ld < rd )
   21.24      {
   21.25 -        spin_lock(&ld->evtchn_lock);
   21.26 -        spin_lock(&rd->evtchn_lock);
   21.27 +        spin_lock(&ld->event_lock);
   21.28 +        spin_lock(&rd->event_lock);
   21.29      }
   21.30      else
   21.31      {
   21.32          if ( ld != rd )
   21.33 -            spin_lock(&rd->evtchn_lock);
   21.34 -        spin_lock(&ld->evtchn_lock);
   21.35 +            spin_lock(&rd->event_lock);
   21.36 +        spin_lock(&ld->event_lock);
   21.37      }
   21.38  
   21.39      if ( (lport = get_free_port(ld)) < 0 )
   21.40 @@ -216,9 +216,9 @@ static long evtchn_bind_interdomain(evtc
   21.41      bind->local_port = lport;
   21.42  
   21.43   out:
   21.44 -    spin_unlock(&ld->evtchn_lock);
   21.45 +    spin_unlock(&ld->event_lock);
   21.46      if ( ld != rd )
   21.47 -        spin_unlock(&rd->evtchn_lock);
   21.48 +        spin_unlock(&rd->event_lock);
   21.49      
   21.50      rcu_unlock_domain(rd);
   21.51  
   21.52 @@ -244,7 +244,7 @@ static long evtchn_bind_virq(evtchn_bind
   21.53           ((v = d->vcpu[vcpu]) == NULL) )
   21.54          return -ENOENT;
   21.55  
   21.56 -    spin_lock(&d->evtchn_lock);
   21.57 +    spin_lock(&d->event_lock);
   21.58  
   21.59      if ( v->virq_to_evtchn[virq] != 0 )
   21.60          ERROR_EXIT(-EEXIST);
   21.61 @@ -260,7 +260,7 @@ static long evtchn_bind_virq(evtchn_bind
   21.62      v->virq_to_evtchn[virq] = bind->port = port;
   21.63  
   21.64   out:
   21.65 -    spin_unlock(&d->evtchn_lock);
   21.66 +    spin_unlock(&d->event_lock);
   21.67  
   21.68      return rc;
   21.69  }
   21.70 @@ -277,7 +277,7 @@ static long evtchn_bind_ipi(evtchn_bind_
   21.71           (d->vcpu[vcpu] == NULL) )
   21.72          return -ENOENT;
   21.73  
   21.74 -    spin_lock(&d->evtchn_lock);
   21.75 +    spin_lock(&d->event_lock);
   21.76  
   21.77      if ( (port = get_free_port(d)) < 0 )
   21.78          ERROR_EXIT(port);
   21.79 @@ -289,7 +289,7 @@ static long evtchn_bind_ipi(evtchn_bind_
   21.80      bind->port = port;
   21.81  
   21.82   out:
   21.83 -    spin_unlock(&d->evtchn_lock);
   21.84 +    spin_unlock(&d->event_lock);
   21.85  
   21.86      return rc;
   21.87  }
   21.88 @@ -308,7 +308,7 @@ static long evtchn_bind_pirq(evtchn_bind
   21.89      if ( !irq_access_permitted(d, pirq) )
   21.90          return -EPERM;
   21.91  
   21.92 -    spin_lock(&d->evtchn_lock);
   21.93 +    spin_lock(&d->event_lock);
   21.94  
   21.95      if ( d->pirq_to_evtchn[pirq] != 0 )
   21.96          ERROR_EXIT(-EEXIST);
   21.97 @@ -333,7 +333,7 @@ static long evtchn_bind_pirq(evtchn_bind
   21.98      bind->port = port;
   21.99  
  21.100   out:
  21.101 -    spin_unlock(&d->evtchn_lock);
  21.102 +    spin_unlock(&d->event_lock);
  21.103  
  21.104      return rc;
  21.105  }
  21.106 @@ -348,7 +348,7 @@ static long __evtchn_close(struct domain
  21.107      long           rc = 0;
  21.108  
  21.109   again:
  21.110 -    spin_lock(&d1->evtchn_lock);
  21.111 +    spin_lock(&d1->event_lock);
  21.112  
  21.113      if ( !port_is_valid(d1, port1) )
  21.114      {
  21.115 @@ -404,12 +404,12 @@ static long __evtchn_close(struct domain
  21.116  
  21.117              if ( d1 < d2 )
  21.118              {
  21.119 -                spin_lock(&d2->evtchn_lock);
  21.120 +                spin_lock(&d2->event_lock);
  21.121              }
  21.122              else if ( d1 != d2 )
  21.123              {
  21.124 -                spin_unlock(&d1->evtchn_lock);
  21.125 -                spin_lock(&d2->evtchn_lock);
  21.126 +                spin_unlock(&d1->event_lock);
  21.127 +                spin_lock(&d2->event_lock);
  21.128                  goto again;
  21.129              }
  21.130          }
  21.131 @@ -454,11 +454,11 @@ static long __evtchn_close(struct domain
  21.132      if ( d2 != NULL )
  21.133      {
  21.134          if ( d1 != d2 )
  21.135 -            spin_unlock(&d2->evtchn_lock);
  21.136 +            spin_unlock(&d2->event_lock);
  21.137          put_domain(d2);
  21.138      }
  21.139  
  21.140 -    spin_unlock(&d1->evtchn_lock);
  21.141 +    spin_unlock(&d1->event_lock);
  21.142  
  21.143      return rc;
  21.144  }
  21.145 @@ -476,11 +476,11 @@ int evtchn_send(struct domain *d, unsign
  21.146      struct vcpu   *rvcpu;
  21.147      int            rport, ret = 0;
  21.148  
  21.149 -    spin_lock(&ld->evtchn_lock);
  21.150 +    spin_lock(&ld->event_lock);
  21.151  
  21.152      if ( unlikely(!port_is_valid(ld, lport)) )
  21.153      {
  21.154 -        spin_unlock(&ld->evtchn_lock);
  21.155 +        spin_unlock(&ld->event_lock);
  21.156          return -EINVAL;
  21.157      }
  21.158  
  21.159 @@ -489,7 +489,7 @@ int evtchn_send(struct domain *d, unsign
  21.160      /* Guest cannot send via a Xen-attached event channel. */
  21.161      if ( unlikely(lchn->consumer_is_xen) )
  21.162      {
  21.163 -        spin_unlock(&ld->evtchn_lock);
  21.164 +        spin_unlock(&ld->event_lock);
  21.165          return -EINVAL;
  21.166      }
  21.167  
  21.168 @@ -527,7 +527,7 @@ int evtchn_send(struct domain *d, unsign
  21.169      }
  21.170  
  21.171  out:
  21.172 -    spin_unlock(&ld->evtchn_lock);
  21.173 +    spin_unlock(&ld->event_lock);
  21.174  
  21.175      return ret;
  21.176  }
  21.177 @@ -656,7 +656,7 @@ static long evtchn_status(evtchn_status_
  21.178      if ( rc )
  21.179          return rc;
  21.180  
  21.181 -    spin_lock(&d->evtchn_lock);
  21.182 +    spin_lock(&d->event_lock);
  21.183  
  21.184      if ( !port_is_valid(d, port) )
  21.185      {
  21.186 @@ -704,7 +704,7 @@ static long evtchn_status(evtchn_status_
  21.187      status->vcpu = chn->notify_vcpu_id;
  21.188  
  21.189   out:
  21.190 -    spin_unlock(&d->evtchn_lock);
  21.191 +    spin_unlock(&d->event_lock);
  21.192      rcu_unlock_domain(d);
  21.193  
  21.194      return rc;
  21.195 @@ -720,7 +720,7 @@ long evtchn_bind_vcpu(unsigned int port,
  21.196      if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
  21.197          return -ENOENT;
  21.198  
  21.199 -    spin_lock(&d->evtchn_lock);
  21.200 +    spin_lock(&d->event_lock);
  21.201  
  21.202      if ( !port_is_valid(d, port) )
  21.203      {
  21.204 @@ -756,7 +756,7 @@ long evtchn_bind_vcpu(unsigned int port,
  21.205      }
  21.206  
  21.207   out:
  21.208 -    spin_unlock(&d->evtchn_lock);
  21.209 +    spin_unlock(&d->event_lock);
  21.210  
  21.211      return rc;
  21.212  }
  21.213 @@ -768,11 +768,11 @@ static long evtchn_unmask(evtchn_unmask_
  21.214      int            port = unmask->port;
  21.215      struct vcpu   *v;
  21.216  
  21.217 -    spin_lock(&d->evtchn_lock);
  21.218 +    spin_lock(&d->event_lock);
  21.219  
  21.220      if ( unlikely(!port_is_valid(d, port)) )
  21.221      {
  21.222 -        spin_unlock(&d->evtchn_lock);
  21.223 +        spin_unlock(&d->event_lock);
  21.224          return -EINVAL;
  21.225      }
  21.226  
  21.227 @@ -790,7 +790,7 @@ static long evtchn_unmask(evtchn_unmask_
  21.228          vcpu_mark_events_pending(v);
  21.229      }
  21.230  
  21.231 -    spin_unlock(&d->evtchn_lock);
  21.232 +    spin_unlock(&d->event_lock);
  21.233  
  21.234      return 0;
  21.235  }
  21.236 @@ -944,7 +944,7 @@ int alloc_unbound_xen_event_channel(
  21.237      struct domain *d = local_vcpu->domain;
  21.238      int            port;
  21.239  
  21.240 -    spin_lock(&d->evtchn_lock);
  21.241 +    spin_lock(&d->event_lock);
  21.242  
  21.243      if ( (port = get_free_port(d)) < 0 )
  21.244          goto out;
  21.245 @@ -956,7 +956,7 @@ int alloc_unbound_xen_event_channel(
  21.246      chn->u.unbound.remote_domid = remote_domid;
  21.247  
  21.248   out:
  21.249 -    spin_unlock(&d->evtchn_lock);
  21.250 +    spin_unlock(&d->event_lock);
  21.251  
  21.252      return port;
  21.253  }
  21.254 @@ -968,11 +968,11 @@ void free_xen_event_channel(
  21.255      struct evtchn *chn;
  21.256      struct domain *d = local_vcpu->domain;
  21.257  
  21.258 -    spin_lock(&d->evtchn_lock);
  21.259 +    spin_lock(&d->event_lock);
  21.260  
  21.261      if ( unlikely(d->is_dying) )
  21.262      {
  21.263 -        spin_unlock(&d->evtchn_lock);
  21.264 +        spin_unlock(&d->event_lock);
  21.265          return;
  21.266      }
  21.267  
  21.268 @@ -981,7 +981,7 @@ void free_xen_event_channel(
  21.269      BUG_ON(!chn->consumer_is_xen);
  21.270      chn->consumer_is_xen = 0;
  21.271  
  21.272 -    spin_unlock(&d->evtchn_lock);
  21.273 +    spin_unlock(&d->event_lock);
  21.274  
  21.275      (void)__evtchn_close(d, port);
  21.276  }
  21.277 @@ -993,7 +993,7 @@ void notify_via_xen_event_channel(int lp
  21.278      struct domain *ld = current->domain, *rd;
  21.279      int            rport;
  21.280  
  21.281 -    spin_lock(&ld->evtchn_lock);
  21.282 +    spin_lock(&ld->event_lock);
  21.283  
  21.284      ASSERT(port_is_valid(ld, lport));
  21.285      lchn = evtchn_from_port(ld, lport);
  21.286 @@ -1007,13 +1007,13 @@ void notify_via_xen_event_channel(int lp
  21.287          evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
  21.288      }
  21.289  
  21.290 -    spin_unlock(&ld->evtchn_lock);
  21.291 +    spin_unlock(&ld->event_lock);
  21.292  }
  21.293  
  21.294  
  21.295  int evtchn_init(struct domain *d)
  21.296  {
  21.297 -    spin_lock_init(&d->evtchn_lock);
  21.298 +    spin_lock_init(&d->event_lock);
  21.299      if ( get_free_port(d) != 0 )
  21.300          return -EINVAL;
  21.301      evtchn_from_port(d, 0)->state = ECS_RESERVED;
  21.302 @@ -1027,7 +1027,7 @@ void evtchn_destroy(struct domain *d)
  21.303  
  21.304      /* After this barrier no new event-channel allocations can occur. */
  21.305      BUG_ON(!d->is_dying);
  21.306 -    spin_barrier(&d->evtchn_lock);
  21.307 +    spin_barrier(&d->event_lock);
  21.308  
  21.309      /* Close all existing event channels. */
  21.310      for ( i = 0; port_is_valid(d, i); i++ )
  21.311 @@ -1037,14 +1037,14 @@ void evtchn_destroy(struct domain *d)
  21.312      }
  21.313  
  21.314      /* Free all event-channel buckets. */
  21.315 -    spin_lock(&d->evtchn_lock);
  21.316 +    spin_lock(&d->event_lock);
  21.317      for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
  21.318      {
  21.319          xsm_free_security_evtchn(d->evtchn[i]);
  21.320          xfree(d->evtchn[i]);
  21.321          d->evtchn[i] = NULL;
  21.322      }
  21.323 -    spin_unlock(&d->evtchn_lock);
  21.324 +    spin_unlock(&d->event_lock);
  21.325  }
  21.326  
  21.327  static void domain_dump_evtchn_info(struct domain *d)
  21.328 @@ -1053,7 +1053,7 @@ static void domain_dump_evtchn_info(stru
  21.329  
  21.330      printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
  21.331  
  21.332 -    if ( !spin_trylock(&d->evtchn_lock) )
  21.333 +    if ( !spin_trylock(&d->event_lock) )
  21.334          return;
  21.335  
  21.336      printk("Event channel information for domain %d:\n",
  21.337 @@ -1094,7 +1094,7 @@ static void domain_dump_evtchn_info(stru
  21.338          printk(" x=%d\n", chn->consumer_is_xen);
  21.339      }
  21.340  
  21.341 -    spin_unlock(&d->evtchn_lock);
  21.342 +    spin_unlock(&d->event_lock);
  21.343  }
  21.344  
  21.345  static void dump_evtchn_info(unsigned char key)
    22.1 --- a/xen/drivers/Makefile	Fri Oct 10 11:58:03 2008 +0900
    22.2 +++ b/xen/drivers/Makefile	Fri Oct 10 12:06:46 2008 +0900
    22.3 @@ -1,5 +1,6 @@
    22.4  subdir-y += char
    22.5  subdir-y += cpufreq
    22.6 +subdir-y += pci
    22.7  subdir-$(x86) += passthrough
    22.8  subdir-$(HAS_ACPI) += acpi
    22.9  subdir-$(HAS_VGA) += video
    23.1 --- a/xen/drivers/passthrough/io.c	Fri Oct 10 11:58:03 2008 +0900
    23.2 +++ b/xen/drivers/passthrough/io.c	Fri Oct 10 12:06:46 2008 +0900
    23.3 @@ -26,10 +26,14 @@ static void pt_irq_time_out(void *data)
    23.4      struct hvm_mirq_dpci_mapping *irq_map = data;
    23.5      unsigned int guest_gsi, machine_gsi = 0;
    23.6      int vector;
    23.7 -    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
    23.8 +    struct hvm_irq_dpci *dpci = NULL;
    23.9      struct dev_intx_gsi_link *digl;
   23.10      uint32_t device, intx;
   23.11  
   23.12 +    spin_lock(&irq_map->dom->event_lock);
   23.13 +
   23.14 +    dpci = domain_get_irq_dpci(irq_map->dom);
   23.15 +    ASSERT(dpci);
   23.16      list_for_each_entry ( digl, &irq_map->digl_list, list )
   23.17      {
   23.18          guest_gsi = digl->gsi;
   23.19 @@ -41,55 +45,65 @@ static void pt_irq_time_out(void *data)
   23.20  
   23.21      clear_bit(machine_gsi, dpci->dirq_mask);
   23.22      vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
   23.23 -    stop_timer(&dpci->hvm_timer[vector]);
   23.24 -    spin_lock(&dpci->dirq_lock);
   23.25      dpci->mirq[machine_gsi].pending = 0;
   23.26 -    spin_unlock(&dpci->dirq_lock);
   23.27 +    spin_unlock(&irq_map->dom->event_lock);
   23.28      pirq_guest_eoi(irq_map->dom, machine_gsi);
   23.29  }
   23.30  
   23.31  int pt_irq_create_bind_vtd(
   23.32      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
   23.33  {
   23.34 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
   23.35 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
   23.36      uint32_t machine_gsi, guest_gsi;
   23.37      uint32_t device, intx, link;
   23.38      struct dev_intx_gsi_link *digl;
   23.39 +    int pirq = pt_irq_bind->machine_irq;
   23.40  
   23.41 +    if ( pirq < 0 || pirq >= NR_PIRQS )
   23.42 +        return -EINVAL;
   23.43 +
   23.44 +    spin_lock(&d->event_lock);
   23.45 +
   23.46 +    hvm_irq_dpci = domain_get_irq_dpci(d);
   23.47      if ( hvm_irq_dpci == NULL )
   23.48      {
   23.49          hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
   23.50          if ( hvm_irq_dpci == NULL )
   23.51 +        {
   23.52 +            spin_unlock(&d->event_lock);
   23.53              return -ENOMEM;
   23.54 -
   23.55 +        }
   23.56          memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
   23.57 -        spin_lock_init(&hvm_irq_dpci->dirq_lock);
   23.58          for ( int i = 0; i < NR_IRQS; i++ )
   23.59              INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
   23.60 +    }
   23.61  
   23.62 -        if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
   23.63 -            xfree(hvm_irq_dpci);
   23.64 +    if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
   23.65 +    {
   23.66 +        xfree(hvm_irq_dpci);
   23.67 +        spin_unlock(&d->event_lock);
   23.68 +        return -EINVAL;
   23.69      }
   23.70  
   23.71      if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
   23.72      {
   23.73 -        int pirq = pt_irq_bind->machine_irq;
   23.74  
   23.75 -        if ( pirq < 0 || pirq >= NR_IRQS )
   23.76 -            return -EINVAL;
   23.77 -
   23.78 -        if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
   23.79 +        if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
   23.80          {
   23.81 -            hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
   23.82 -                                              HVM_IRQ_DPCI_MSI ;
   23.83 +            set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
   23.84 +            hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
   23.85 +            hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
   23.86 +            hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
   23.87 +            /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
   23.88              pirq_guest_bind(d->vcpu[0], pirq, 0);
   23.89          }
   23.90 +        else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
   23.91 +                ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
   23.92  
   23.93 -        hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
   23.94 -        hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
   23.95 -        hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
   23.96 -        hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
   23.97 -
   23.98 +        {
   23.99 +            spin_unlock(&d->event_lock);
  23.100 +            return -EBUSY;
  23.101 +        }
  23.102      }
  23.103      else
  23.104      {
  23.105 @@ -102,7 +116,10 @@ int pt_irq_create_bind_vtd(
  23.106  
  23.107          digl = xmalloc(struct dev_intx_gsi_link);
  23.108          if ( !digl )
  23.109 +        {
  23.110 +            spin_unlock(&d->event_lock);
  23.111              return -ENOMEM;
  23.112 +        }
  23.113  
  23.114          digl->device = device;
  23.115          digl->intx = intx;
  23.116 @@ -117,11 +134,11 @@ int pt_irq_create_bind_vtd(
  23.117          hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
  23.118  
  23.119          /* Bind the same mirq once in the same domain */
  23.120 -        if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
  23.121 +        if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
  23.122          {
  23.123 -            hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
  23.124              hvm_irq_dpci->mirq[machine_gsi].dom = d;
  23.125  
  23.126 +            /* Init timer before binding */
  23.127              init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
  23.128                         pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
  23.129              /* Deal with gsi for legacy devices */
  23.130 @@ -132,37 +149,45 @@ int pt_irq_create_bind_vtd(
  23.131                   "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
  23.132                   machine_gsi, device, intx);
  23.133      }
  23.134 +    spin_unlock(&d->event_lock);
  23.135      return 0;
  23.136  }
  23.137  
  23.138  int pt_irq_destroy_bind_vtd(
  23.139      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
  23.140  {
  23.141 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
  23.142 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
  23.143      uint32_t machine_gsi, guest_gsi;
  23.144      uint32_t device, intx, link;
  23.145      struct list_head *digl_list, *tmp;
  23.146      struct dev_intx_gsi_link *digl;
  23.147  
  23.148 -    if ( hvm_irq_dpci == NULL )
  23.149 -        return 0;
  23.150 -
  23.151      machine_gsi = pt_irq_bind->machine_irq;
  23.152      device = pt_irq_bind->u.pci.device;
  23.153      intx = pt_irq_bind->u.pci.intx;
  23.154      guest_gsi = hvm_pci_intx_gsi(device, intx);
  23.155      link = hvm_pci_intx_link(device, intx);
  23.156 -    hvm_irq_dpci->link_cnt[link]--;
  23.157  
  23.158      gdprintk(XENLOG_INFO,
  23.159               "pt_irq_destroy_bind_vtd: machine_gsi=%d "
  23.160               "guest_gsi=%d, device=%d, intx=%d.\n",
  23.161               machine_gsi, guest_gsi, device, intx);
  23.162 +    spin_lock(&d->event_lock);
  23.163 +
  23.164 +    hvm_irq_dpci = domain_get_irq_dpci(d);
  23.165 +
  23.166 +    if ( hvm_irq_dpci == NULL )
  23.167 +    {
  23.168 +        spin_unlock(&d->event_lock);
  23.169 +        return -EINVAL;
  23.170 +    }
  23.171 +
  23.172 +    hvm_irq_dpci->link_cnt[link]--;
  23.173      memset(&hvm_irq_dpci->girq[guest_gsi], 0,
  23.174             sizeof(struct hvm_girq_dpci_mapping));
  23.175  
  23.176      /* clear the mirq info */
  23.177 -    if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
  23.178 +    if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
  23.179      {
  23.180          list_for_each_safe ( digl_list, tmp,
  23.181                  &hvm_irq_dpci->mirq[machine_gsi].digl_list )
  23.182 @@ -185,9 +210,10 @@ int pt_irq_destroy_bind_vtd(
  23.183              kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
  23.184              hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
  23.185              hvm_irq_dpci->mirq[machine_gsi].flags = 0;
  23.186 +            clear_bit(machine_gsi, hvm_irq_dpci->mapping);
  23.187          }
  23.188      }
  23.189 -
  23.190 +    spin_unlock(&d->event_lock);
  23.191      gdprintk(XENLOG_INFO,
  23.192               "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
  23.193               machine_gsi, device, intx);
  23.194 @@ -199,8 +225,9 @@ int hvm_do_IRQ_dpci(struct domain *d, un
  23.195  {
  23.196      struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
  23.197  
  23.198 +    ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
  23.199      if ( !iommu_enabled || (d == dom0) || !dpci ||
  23.200 -         !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
  23.201 +         !test_bit(mirq, dpci->mapping))
  23.202          return 0;
  23.203  
  23.204      /*
  23.205 @@ -218,44 +245,46 @@ int hvm_do_IRQ_dpci(struct domain *d, un
  23.206      return 1;
  23.207  }
  23.208  
  23.209 -
  23.210  void hvm_dpci_msi_eoi(struct domain *d, int vector)
  23.211  {
  23.212      struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  23.213 +    irq_desc_t *desc;
  23.214      int pirq;
  23.215 -    unsigned long flags;
  23.216 -    irq_desc_t *desc;
  23.217  
  23.218      if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
  23.219         return;
  23.220  
  23.221 +    spin_lock(&d->event_lock);
  23.222      pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
  23.223  
  23.224      if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
  23.225 -         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
  23.226 -         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
  23.227 -    {
  23.228 -        int vec;
  23.229 -        vec = domain_irq_to_vector(d, pirq);
  23.230 -        desc = &irq_desc[vec];
  23.231 +          test_bit(pirq, hvm_irq_dpci->mapping) &&
  23.232 +         (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
  23.233 +     {
  23.234 +         BUG_ON(!local_irq_is_enabled());
  23.235 +         desc = domain_spin_lock_irq_desc(d, pirq, NULL);
  23.236 +         if (!desc)
  23.237 +         {
  23.238 +            spin_unlock(&d->event_lock);
  23.239 +            return;
  23.240 +         }
  23.241  
  23.242 -        spin_lock_irqsave(&desc->lock, flags);
  23.243 -        desc->status &= ~IRQ_INPROGRESS;
  23.244 -        spin_unlock_irqrestore(&desc->lock, flags);
  23.245 +         desc->status &= ~IRQ_INPROGRESS;
  23.246 +         spin_unlock_irq(&desc->lock);
  23.247  
  23.248 -        pirq_guest_eoi(d, pirq);
  23.249 -    }
  23.250 +         pirq_guest_eoi(d, pirq);
  23.251 +     }
  23.252 +
  23.253 +    spin_unlock(&d->event_lock);
  23.254  }
  23.255  
  23.256  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
  23.257                    union vioapic_redir_entry *ent)
  23.258  {
  23.259 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
  23.260 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
  23.261      uint32_t device, intx, machine_gsi;
  23.262  
  23.263 -    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
  23.264 -         (guest_gsi >= NR_ISAIRQS &&
  23.265 -          !hvm_irq_dpci->girq[guest_gsi].valid) )
  23.266 +    if ( !iommu_enabled)
  23.267          return;
  23.268  
  23.269      if ( guest_gsi < NR_ISAIRQS )
  23.270 @@ -264,23 +293,34 @@ void hvm_dpci_eoi(struct domain *d, unsi
  23.271          return;
  23.272      }
  23.273  
  23.274 -    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
  23.275 +    spin_lock(&d->event_lock);
  23.276 +    hvm_irq_dpci = domain_get_irq_dpci(d);
  23.277 +
  23.278 +    if((hvm_irq_dpci == NULL) ||
  23.279 +         (guest_gsi >= NR_ISAIRQS &&
  23.280 +          !hvm_irq_dpci->girq[guest_gsi].valid) )
  23.281 +    {
  23.282 +        spin_unlock(&d->event_lock);
  23.283 +        return;
  23.284 +    }
  23.285 +
  23.286      device = hvm_irq_dpci->girq[guest_gsi].device;
  23.287      intx = hvm_irq_dpci->girq[guest_gsi].intx;
  23.288      hvm_pci_intx_deassert(d, device, intx);
  23.289  
  23.290 -    spin_lock(&hvm_irq_dpci->dirq_lock);
  23.291 +    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
  23.292      if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
  23.293      {
  23.294 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  23.295 -
  23.296          if ( (ent == NULL) || !ent->fields.mask )
  23.297          {
  23.298 +            /*
  23.299 +             * No need to get vector lock for timer
  23.300 +             * since interrupt is still not EOIed
  23.301 +             */
  23.302              stop_timer(&hvm_irq_dpci->hvm_timer[
  23.303                  domain_irq_to_vector(d, machine_gsi)]);
  23.304              pirq_guest_eoi(d, machine_gsi);
  23.305          }
  23.306      }
  23.307 -    else
  23.308 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  23.309 +    spin_unlock(&d->event_lock);
  23.310  }
    24.1 --- a/xen/drivers/passthrough/pci.c	Fri Oct 10 11:58:03 2008 +0900
    24.2 +++ b/xen/drivers/passthrough/pci.c	Fri Oct 10 12:06:46 2008 +0900
    24.3 @@ -154,7 +154,7 @@ int pci_remove_device(u8 bus, u8 devfn)
    24.4  
    24.5  static void pci_clean_dpci_irqs(struct domain *d)
    24.6  {
    24.7 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
    24.8 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
    24.9      uint32_t i;
   24.10      struct list_head *digl_list, *tmp;
   24.11      struct dev_intx_gsi_link *digl;
   24.12 @@ -165,13 +165,14 @@ static void pci_clean_dpci_irqs(struct d
   24.13      if ( !is_hvm_domain(d) && !need_iommu(d) )
   24.14          return;
   24.15  
   24.16 +    spin_lock(&d->event_lock);
   24.17 +    hvm_irq_dpci = domain_get_irq_dpci(d);
   24.18      if ( hvm_irq_dpci != NULL )
   24.19      {
   24.20 -        for ( i = 0; i < NR_IRQS; i++ )
   24.21 +        for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_PIRQS);
   24.22 +              i < NR_PIRQS;
   24.23 +              i = find_next_bit(hvm_irq_dpci->mapping, NR_PIRQS, i + 1) )
   24.24          {
   24.25 -            if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
   24.26 -                continue;
   24.27 -
   24.28              pirq_guest_unbind(d, i);
   24.29              kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
   24.30  
   24.31 @@ -188,6 +189,7 @@ static void pci_clean_dpci_irqs(struct d
   24.32          d->arch.hvm_domain.irq.dpci = NULL;
   24.33          xfree(hvm_irq_dpci);
   24.34      }
   24.35 +    spin_unlock(&d->event_lock);
   24.36  }
   24.37  
   24.38  void pci_release_devices(struct domain *d)
    25.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Fri Oct 10 11:58:03 2008 +0900
    25.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Fri Oct 10 12:06:46 2008 +0900
    25.3 @@ -44,7 +44,7 @@ u16 apicid_to_bdf(int apic_id)
    25.4  }
    25.5  
    25.6  static int remap_entry_to_ioapic_rte(
    25.7 -    struct iommu *iommu, struct IO_APIC_route_entry *old_rte)
    25.8 +    struct iommu *iommu, struct IO_xAPIC_route_entry *old_rte)
    25.9  {
   25.10      struct iremap_entry *iremap_entry = NULL, *iremap_entries;
   25.11      struct IO_APIC_route_remap_entry *remap_rte;
   25.12 @@ -90,13 +90,13 @@ static int remap_entry_to_ioapic_rte(
   25.13  }
   25.14  
   25.15  static int ioapic_rte_to_remap_entry(struct iommu *iommu,
   25.16 -    int apic_id, struct IO_APIC_route_entry *old_rte,
   25.17 +    int apic_id, struct IO_xAPIC_route_entry *old_rte,
   25.18      unsigned int rte_upper, unsigned int value)
   25.19  {
   25.20      struct iremap_entry *iremap_entry = NULL, *iremap_entries;
   25.21      struct iremap_entry new_ire;
   25.22      struct IO_APIC_route_remap_entry *remap_rte;
   25.23 -    struct IO_APIC_route_entry new_rte;
   25.24 +    struct IO_xAPIC_route_entry new_rte;
   25.25      int index;
   25.26      unsigned long flags;
   25.27      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   25.28 @@ -177,7 +177,7 @@ static int ioapic_rte_to_remap_entry(str
   25.29  unsigned int io_apic_read_remap_rte(
   25.30      unsigned int apic, unsigned int reg)
   25.31  {
   25.32 -    struct IO_APIC_route_entry old_rte = { 0 };
   25.33 +    struct IO_xAPIC_route_entry old_rte = { 0 };
   25.34      struct IO_APIC_route_remap_entry *remap_rte;
   25.35      int rte_upper = (reg & 1) ? 1 : 0;
   25.36      struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
   25.37 @@ -222,7 +222,7 @@ unsigned int io_apic_read_remap_rte(
   25.38  void io_apic_write_remap_rte(
   25.39      unsigned int apic, unsigned int reg, unsigned int value)
   25.40  {
   25.41 -    struct IO_APIC_route_entry old_rte = { 0 };
   25.42 +    struct IO_xAPIC_route_entry old_rte = { 0 };
   25.43      struct IO_APIC_route_remap_entry *remap_rte;
   25.44      unsigned int rte_upper = (reg & 1) ? 1 : 0;
   25.45      struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
    26.1 --- a/xen/drivers/passthrough/vtd/vtd.h	Fri Oct 10 11:58:03 2008 +0900
    26.2 +++ b/xen/drivers/passthrough/vtd/vtd.h	Fri Oct 10 12:06:46 2008 +0900
    26.3 @@ -23,6 +23,43 @@
    26.4  
    26.5  #include <xen/iommu.h>
    26.6  
    26.7 +/* Accomodate both IOAPIC and IOSAPIC. */
    26.8 +struct IO_xAPIC_route_entry {
    26.9 +    __u32   vector      :  8,
   26.10 +        delivery_mode   :  3,   /* 000: FIXED
   26.11 +                                 * 001: lowest prio
   26.12 +                                 * 111: ExtINT
   26.13 +                                 */
   26.14 +        dest_mode       :  1,   /* 0: physical, 1: logical */
   26.15 +        delivery_status :  1,
   26.16 +        polarity        :  1,
   26.17 +        irr             :  1,
   26.18 +        trigger         :  1,   /* 0: edge, 1: level */
   26.19 +        mask            :  1,   /* 0: enabled, 1: disabled */
   26.20 +        __reserved_2    : 15;
   26.21 +   
   26.22 +    union {
   26.23 +        struct { __u32
   26.24 +            __reserved_1    : 24,
   26.25 +            physical_dest   :  4,
   26.26 +            __reserved_2    :  4;
   26.27 +        } physical;
   26.28 +
   26.29 +        struct { __u32
   26.30 +            __reserved_1    : 24,
   26.31 +            logical_dest    :  8;
   26.32 +        } logical;
   26.33 +
   26.34 +#ifdef __ia64__
   26.35 +        struct { __u32
   26.36 +            __reserved_1    : 16,
   26.37 +            dest_id         : 16;
   26.38 +        };
   26.39 +#endif
   26.40 +    } dest;
   26.41 +
   26.42 +} __attribute__ ((packed));
   26.43 +
   26.44  struct IO_APIC_route_remap_entry {
   26.45      union {
   26.46          u64 val;
    27.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Fri Oct 10 11:58:03 2008 +0900
    27.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Fri Oct 10 12:06:46 2008 +0900
    27.3 @@ -85,37 +85,41 @@ int domain_set_irq_dpci(struct domain *d
    27.4  void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
    27.5  {
    27.6      struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    27.7 -    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
    27.8 +    struct hvm_irq_dpci *dpci = NULL;
    27.9      struct dev_intx_gsi_link *digl, *tmp;
   27.10      int i;
   27.11  
   27.12      ASSERT(isairq < NR_ISAIRQS);
   27.13 -    if ( !vtd_enabled || !dpci ||
   27.14 -         !test_bit(isairq, dpci->isairq_map) )
   27.15 +    if ( !vtd_enabled)
   27.16          return;
   27.17  
   27.18 -    /* Multiple mirq may be mapped to one isa irq */
   27.19 -    for ( i = 0; i < NR_IRQS; i++ )
   27.20 +    spin_lock(&d->event_lock);
   27.21 +
   27.22 +    dpci = domain_get_irq_dpci(d);
   27.23 +
   27.24 +    if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
   27.25      {
   27.26 -        if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
   27.27 -            continue;
   27.28 -
   27.29 +        spin_unlock(&d->event_lock);
   27.30 +        return;
   27.31 +    }
   27.32 +    /* Multiple mirq may be mapped to one isa irq */
   27.33 +    for ( i = find_first_bit(dpci->mapping, NR_PIRQS);
   27.34 +          i < NR_PIRQS;
   27.35 +          i = find_next_bit(dpci->mapping, NR_PIRQS, i + 1) )
   27.36 +    {
   27.37          list_for_each_entry_safe ( digl, tmp,
   27.38              &dpci->mirq[i].digl_list, list )
   27.39          {
   27.40              if ( hvm_irq->pci_link.route[digl->link] == isairq )
   27.41              {
   27.42                  hvm_pci_intx_deassert(d, digl->device, digl->intx);
   27.43 -                spin_lock(&dpci->dirq_lock);
   27.44                  if ( --dpci->mirq[i].pending == 0 )
   27.45                  {
   27.46 -                    spin_unlock(&dpci->dirq_lock);
   27.47                      stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
   27.48                      pirq_guest_eoi(d, i);
   27.49                  }
   27.50 -                else
   27.51 -                    spin_unlock(&dpci->dirq_lock);
   27.52              }
   27.53          }
   27.54      }
   27.55 +    spin_unlock(&d->event_lock);
   27.56  }
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/drivers/pci/Makefile	Fri Oct 10 12:06:46 2008 +0900
    28.3 @@ -0,0 +1,1 @@
    28.4 +obj-y += pci.o
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/drivers/pci/pci.c	Fri Oct 10 12:06:46 2008 +0900
    29.3 @@ -0,0 +1,64 @@
    29.4 +/******************************************************************************
    29.5 + * pci.c
    29.6 + *
    29.7 + * Architecture-independent PCI access functions.
    29.8 + */
    29.9 +
   29.10 +#include <xen/pci.h>
   29.11 +#include <xen/pci_regs.h>
   29.12 +
   29.13 +int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap)
   29.14 +{
   29.15 +    u8 id;
   29.16 +    int max_cap = 48;
   29.17 +    u8 pos = PCI_CAPABILITY_LIST;
   29.18 +    u16 status;
   29.19 +
   29.20 +    status = pci_conf_read16(bus, dev, func, PCI_STATUS);
   29.21 +    if ( (status & PCI_STATUS_CAP_LIST) == 0 )
   29.22 +        return 0;
   29.23 +
   29.24 +    while ( max_cap-- )
   29.25 +    {
   29.26 +        pos = pci_conf_read8(bus, dev, func, pos);
   29.27 +        if ( pos < 0x40 )
   29.28 +            break;
   29.29 +
   29.30 +        pos &= ~3;
   29.31 +        id = pci_conf_read8(bus, dev, func, pos + PCI_CAP_LIST_ID);
   29.32 +
   29.33 +        if ( id == 0xff )
   29.34 +            break;
   29.35 +        else if ( id == cap )
   29.36 +            return pos;
   29.37 +
   29.38 +        pos += PCI_CAP_LIST_NEXT;
   29.39 +    }
   29.40 +
   29.41 +    return 0;
   29.42 +}
   29.43 +
   29.44 +int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap)
   29.45 +{
   29.46 +    u8 id;
   29.47 +    int ttl = 48;
   29.48 +
   29.49 +    while ( ttl-- )
   29.50 +    {
   29.51 +        pos = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
   29.52 +        if ( pos < 0x40 )
   29.53 +            break;
   29.54 +
   29.55 +        pos &= ~3;
   29.56 +        id = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   29.57 +                            pos + PCI_CAP_LIST_ID);
   29.58 +
   29.59 +        if ( id == 0xff )
   29.60 +            break;
   29.61 +        if ( id == cap )
   29.62 +            return pos;
   29.63 +
   29.64 +        pos += PCI_CAP_LIST_NEXT;
   29.65 +    }
   29.66 +    return 0;
   29.67 +}
    30.1 --- a/xen/include/asm-x86/domain.h	Fri Oct 10 11:58:03 2008 +0900
    30.2 +++ b/xen/include/asm-x86/domain.h	Fri Oct 10 12:06:46 2008 +0900
    30.3 @@ -235,7 +235,7 @@ struct arch_domain
    30.4      /* Shadow translated domain: P2M mapping */
    30.5      pagetable_t phys_table;
    30.6  
    30.7 -    /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
    30.8 +    /* NB. protected by d->event_lock and by irq_desc[vector].lock */
    30.9      int vector_pirq[NR_VECTORS];
   30.10      int pirq_vector[NR_PIRQS];
   30.11  
    31.1 --- a/xen/include/asm-x86/hvm/irq.h	Fri Oct 10 11:58:03 2008 +0900
    31.2 +++ b/xen/include/asm-x86/hvm/irq.h	Fri Oct 10 12:06:46 2008 +0900
    31.3 @@ -25,6 +25,7 @@
    31.4  #include <xen/types.h>
    31.5  #include <xen/spinlock.h>
    31.6  #include <asm/irq.h>
    31.7 +#include <asm/pirq.h>
    31.8  #include <asm/hvm/hvm.h>
    31.9  #include <asm/hvm/vpic.h>
   31.10  #include <asm/hvm/vioapic.h>
   31.11 @@ -38,8 +39,6 @@ struct dev_intx_gsi_link {
   31.12      uint8_t link;
   31.13  };
   31.14  
   31.15 -#define HVM_IRQ_DPCI_VALID 0x1
   31.16 -#define HVM_IRQ_DPCI_MSI   0x2
   31.17  #define _HVM_IRQ_DPCI_MSI  0x1
   31.18  
   31.19  struct hvm_gmsi_info {
   31.20 @@ -64,9 +63,10 @@ struct hvm_girq_dpci_mapping {
   31.21  
   31.22  #define NR_ISAIRQS  16
   31.23  #define NR_LINK     4
   31.24 +/* Protected by domain's event_lock */
   31.25  struct hvm_irq_dpci {
   31.26 -    spinlock_t dirq_lock;
   31.27      /* Machine IRQ to guest device/intx mapping. */
   31.28 +    DECLARE_BITMAP(mapping, NR_PIRQS);
   31.29      struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
   31.30      /* Guest IRQ to guest device/intx mapping. */
   31.31      struct hvm_girq_dpci_mapping girq[NR_IRQS];
    32.1 --- a/xen/include/asm-x86/io_apic.h	Fri Oct 10 11:58:03 2008 +0900
    32.2 +++ b/xen/include/asm-x86/io_apic.h	Fri Oct 10 12:06:46 2008 +0900
    32.3 @@ -190,5 +190,6 @@ static inline int ioapic_resume(void) {r
    32.4  #endif
    32.5  
    32.6  extern int assign_irq_vector(int irq);
    32.7 +extern int free_irq_vector(int vector);
    32.8  
    32.9  #endif
    33.1 --- a/xen/include/asm-x86/irq.h	Fri Oct 10 11:58:03 2008 +0900
    33.2 +++ b/xen/include/asm-x86/irq.h	Fri Oct 10 12:06:46 2008 +0900
    33.3 @@ -19,7 +19,9 @@
    33.4  
    33.5  extern int vector_irq[NR_VECTORS];
    33.6  extern u8 irq_vector[NR_IRQ_VECTORS];
    33.7 -#define AUTO_ASSIGN             -1
    33.8 +#define AUTO_ASSIGN    -1
    33.9 +#define NEVER_ASSIGN   -2
   33.10 +#define FREE_TO_ASSIGN -3
   33.11  
   33.12  #define platform_legacy_irq(irq)	((irq) < 16)
   33.13  
   33.14 @@ -52,6 +54,12 @@ extern atomic_t irq_mis_count;
   33.15  int pirq_acktype(struct domain *d, int irq);
   33.16  int pirq_shared(struct domain *d , int irq);
   33.17  
   33.18 +int map_domain_pirq(struct domain *d, int pirq, int vector, int type,
   33.19 +                           void *data);
   33.20 +int unmap_domain_pirq(struct domain *d, int pirq);
   33.21 +int get_free_pirq(struct domain *d, int type, int index);
   33.22 +void free_domain_pirqs(struct domain *d);
   33.23 +
   33.24  #define domain_irq_to_vector(d, irq) ((d)->arch.pirq_vector[(irq)])
   33.25  #define domain_vector_to_irq(d, vec) ((d)->arch.vector_pirq[(vec)])
   33.26  
    34.1 --- a/xen/include/xen/irq.h	Fri Oct 10 11:58:03 2008 +0900
    34.2 +++ b/xen/include/xen/irq.h	Fri Oct 10 12:06:46 2008 +0900
    34.3 @@ -78,6 +78,8 @@ extern int pirq_guest_eoi(struct domain 
    34.4  extern int pirq_guest_unmask(struct domain *d);
    34.5  extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
    34.6  extern void pirq_guest_unbind(struct domain *d, int irq);
    34.7 +extern irq_desc_t *domain_spin_lock_irq_desc(
    34.8 +    struct domain *d, int irq, unsigned long *pflags);
    34.9  
   34.10  static inline void set_native_irq_info(int irq, cpumask_t mask)
   34.11  {
    35.1 --- a/xen/include/xen/sched.h	Fri Oct 10 11:58:03 2008 +0900
    35.2 +++ b/xen/include/xen/sched.h	Fri Oct 10 12:06:46 2008 +0900
    35.3 @@ -188,7 +188,7 @@ struct domain
    35.4  
    35.5      /* Event channel information. */
    35.6      struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
    35.7 -    spinlock_t       evtchn_lock;
    35.8 +    spinlock_t       event_lock;
    35.9  
   35.10      struct grant_table *grant_table;
   35.11  
    36.1 --- a/xen/xsm/acm/acm_simple_type_enforcement_hooks.c	Fri Oct 10 11:58:03 2008 +0900
    36.2 +++ b/xen/xsm/acm/acm_simple_type_enforcement_hooks.c	Fri Oct 10 12:06:46 2008 +0900
    36.3 @@ -248,11 +248,11 @@ ste_init_state(struct acm_sized_buffer *
    36.4          /* a) check for event channel conflicts */
    36.5          for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
    36.6          {
    36.7 -            spin_lock(&d->evtchn_lock);
    36.8 +            spin_lock(&d->event_lock);
    36.9              ports = d->evtchn[bucket];
   36.10              if ( ports == NULL)
   36.11              {
   36.12 -                spin_unlock(&d->evtchn_lock);
   36.13 +                spin_unlock(&d->event_lock);
   36.14                  break;
   36.15              }
   36.16  
   36.17 @@ -280,7 +280,7 @@ ste_init_state(struct acm_sized_buffer *
   36.18                      printkd("%s: Policy violation in event channel domain "
   36.19                              "%x -> domain %x.\n",
   36.20                              __func__, d->domain_id, rdomid);
   36.21 -                    spin_unlock(&d->evtchn_lock);
   36.22 +                    spin_unlock(&d->event_lock);
   36.23  
   36.24                      acm_array_append_tuple(errors,
   36.25                                             ACM_EVTCHN_SHARING_VIOLATION,
   36.26 @@ -288,7 +288,7 @@ ste_init_state(struct acm_sized_buffer *
   36.27                      goto out;
   36.28                  }
   36.29              }
   36.30 -            spin_unlock(&d->evtchn_lock);
   36.31 +            spin_unlock(&d->event_lock);
   36.32          } 
   36.33  
   36.34