ia64/xen-unstable

changeset 18860:f4c5befcba8d

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Thu Dec 04 11:01:53 2008 +0900 (2008-12-04)
parents 54e5d15af567 7338f6301067
children c15577ad46f2
files
line diff
     1.1 --- a/stubdom/grub/mini-os.c	Wed Dec 03 11:43:54 2008 +0900
     1.2 +++ b/stubdom/grub/mini-os.c	Thu Dec 04 11:01:53 2008 +0900
     1.3 @@ -649,7 +649,7 @@ int getrtsecs (void)
     1.4  {
     1.5      struct timeval tv;
     1.6      gettimeofday(&tv, NULL);
     1.7 -    return tv.tv_sec;
     1.8 +    return tv.tv_sec % 10 + ((tv.tv_sec / 10) % 6) * 0x10;
     1.9  }
    1.10  
    1.11  int currticks (void)
     2.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Wed Dec 03 11:43:54 2008 +0900
     2.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Thu Dec 04 11:01:53 2008 +0900
     2.3 @@ -1990,13 +1990,21 @@ class XendDomainInfo:
     2.4              for devclass in XendDevices.valid_devices():
     2.5                  for dev in t.list(devclass):
     2.6                      try:
     2.7 +                        true_devclass = devclass
     2.8 +                        if devclass == 'vbd':
     2.9 +                            # In the case of "vbd", the true device class
    2.10 +                            # may possibly be "tap". Just in case, verify
    2.11 +                            # device class.
    2.12 +                            devid = dev.split('/')[-1]
    2.13 +                            true_devclass = self.getBlockDeviceClass(devid)
    2.14                          log.debug("Removing %s", dev);
    2.15 -                        self.destroyDevice(devclass, dev, False);
    2.16 +                        self.destroyDevice(true_devclass, dev, False);
    2.17                      except:
    2.18                          # Log and swallow any exceptions in removal --
    2.19                          # there's nothing more we can do.
    2.20                          log.exception("Device release failed: %s; %s; %s",
    2.21 -                                      self.info['name_label'], devclass, dev)
    2.22 +                                      self.info['name_label'],
    2.23 +                                      true_devclass, dev)
    2.24          finally:
    2.25              t.abort()
    2.26  
     3.1 --- a/tools/python/xen/xm/addlabel.py	Wed Dec 03 11:43:54 2008 +0900
     3.2 +++ b/tools/python/xen/xm/addlabel.py	Thu Dec 04 11:01:53 2008 +0900
     3.3 @@ -64,12 +64,13 @@ def validate_config_file(configfile):
     3.4          return 0
     3.5  
     3.6      # sanity check on the data from the file
     3.7 +    # requiring 'memory,' 'name,' and ether 'kernel' or 'bootloader'
     3.8      count = 0
     3.9 -    required = ['kernel', 'memory', 'name']
    3.10 +    required = ['kernel', 'bootloader', 'memory', 'name']
    3.11      for (k, v) in locs.items():
    3.12          if k in required:
    3.13              count += 1
    3.14 -    if count != 3:
    3.15 +    if count < len(required) - 1:
    3.16          print "Invalid configuration file."
    3.17          return 0
    3.18      else:
     4.1 --- a/tools/xcutils/xc_save.c	Wed Dec 03 11:43:54 2008 +0900
     4.2 +++ b/tools/xcutils/xc_save.c	Thu Dec 04 11:01:53 2008 +0900
     4.3 @@ -24,8 +24,11 @@
     4.4  #include <xenguest.h>
     4.5  
     4.6  static struct suspendinfo {
     4.7 +    int xc_fd; /* libxc handle */
     4.8      int xce; /* event channel handle */
     4.9      int suspend_evtchn;
    4.10 +    int domid;
    4.11 +    unsigned int flags;
    4.12  } si;
    4.13  
    4.14  /**
    4.15 @@ -161,6 +164,19 @@ static int evtchn_suspend(void)
    4.16  
    4.17  static int suspend(void)
    4.18  {
    4.19 +    unsigned long sx_state = 0;
    4.20 +
    4.21 +    /* Nothing to do if the guest is in an ACPI sleep state. */
    4.22 +    if (si.flags & XCFLAGS_HVM)
    4.23 +        xc_get_hvm_param(si.xc_fd, si.domid,
    4.24 +                         HVM_PARAM_ACPI_S_STATE, &sx_state);
    4.25 +    if (sx_state != 0) {
    4.26 +        /* notify xend that it can do device migration */
    4.27 +        printf("suspended\n");
    4.28 +        fflush(stdout);
    4.29 +        return 1;
    4.30 +    }
    4.31 +
    4.32      if (si.suspend_evtchn >= 0)
    4.33          return evtchn_suspend();
    4.34  
    4.35 @@ -297,32 +313,32 @@ static void *init_qemu_maps(int domid, u
    4.36  int
    4.37  main(int argc, char **argv)
    4.38  {
    4.39 -    unsigned int domid, maxit, max_f, flags; 
    4.40 -    int xc_fd, io_fd, ret;
    4.41 +    unsigned int maxit, max_f;
    4.42 +    int io_fd, ret;
    4.43  
    4.44      if (argc != 6)
    4.45          errx(1, "usage: %s iofd domid maxit maxf flags", argv[0]);
    4.46  
    4.47 -    xc_fd = xc_interface_open();
    4.48 -    if (xc_fd < 0)
    4.49 +    si.xc_fd = xc_interface_open();
    4.50 +    if (si.xc_fd < 0)
    4.51          errx(1, "failed to open control interface");
    4.52  
    4.53      io_fd = atoi(argv[1]);
    4.54 -    domid = atoi(argv[2]);
    4.55 +    si.domid = atoi(argv[2]);
    4.56      maxit = atoi(argv[3]);
    4.57      max_f = atoi(argv[4]);
    4.58 -    flags = atoi(argv[5]);
    4.59 +    si.flags = atoi(argv[5]);
    4.60  
    4.61 -    if (suspend_evtchn_init(xc_fd, domid) < 0)
    4.62 +    if (suspend_evtchn_init(si.xc_fd, si.domid) < 0)
    4.63          warnx("suspend event channel initialization failed, using slow path");
    4.64  
    4.65 -    ret = xc_domain_save(xc_fd, io_fd, domid, maxit, max_f, flags, 
    4.66 -                         &suspend, !!(flags & XCFLAGS_HVM),
    4.67 +    ret = xc_domain_save(si.xc_fd, io_fd, si.domid, maxit, max_f, si.flags, 
    4.68 +                         &suspend, !!(si.flags & XCFLAGS_HVM),
    4.69                           &init_qemu_maps, &qemu_flip_buffer);
    4.70  
    4.71      suspend_evtchn_release();
    4.72  
    4.73 -    xc_interface_close(xc_fd);
    4.74 +    xc_interface_close(si.xc_fd);
    4.75  
    4.76      return ret;
    4.77  }
     5.1 --- a/tools/xenpmd/xenpmd.c	Wed Dec 03 11:43:54 2008 +0900
     5.2 +++ b/tools/xenpmd/xenpmd.c	Thu Dec 04 11:01:53 2008 +0900
     5.3 @@ -373,7 +373,7 @@ void write_battery_info_to_xenstore(stru
     5.4               (unsigned int)strlen(info->serial_number), info->serial_number,
     5.5               (unsigned int)strlen(info->battery_type), info->battery_type,
     5.6               (unsigned int)strlen(info->oem_info), info->oem_info);
     5.7 -    strncat(val+73, string_info, 1024);
     5.8 +    strncat(val+73, string_info, 1024-73-1);
     5.9      xs_write(xs, XBT_NULL, "/pm/bif", 
    5.10               val, 73+8+strlen(info->model_number)+strlen(info->serial_number)+
    5.11               strlen(info->battery_type)+strlen(info->oem_info)+1);
     6.1 --- a/tools/xenstat/libxenstat/src/xenstat_linux.c	Wed Dec 03 11:43:54 2008 +0900
     6.2 +++ b/tools/xenstat/libxenstat/src/xenstat_linux.c	Thu Dec 04 11:01:53 2008 +0900
     6.3 @@ -182,12 +182,6 @@ int xenstat_collect_vbds(xenstat_node * 
     6.4  	struct dirent *dp;
     6.5  	struct priv_data *priv = get_priv_data(node->handle);
     6.6  
     6.7 -	char *sys_prefix = "statistics/";
     6.8 -
     6.9 -	/* 23 = "statistics/" + "xxxx_xx_req" */
    6.10 -	char ooreq[23], rdreq[23], wrreq[23]; 
    6.11 -	char *stat_prefix = NULL;
    6.12 -
    6.13  	if (priv == NULL) {
    6.14  		perror("Allocation error");
    6.15  		return 0;
    6.16 @@ -215,16 +209,12 @@ int xenstat_collect_vbds(xenstat_node * 
    6.17  		if (ret != 3)
    6.18  			continue;
    6.19  
    6.20 -
    6.21 -		if (strcmp(buf,"vbd") == 0){
    6.22 -			stat_prefix = "";
    6.23 +		if (strcmp(buf,"vbd") == 0)
    6.24  			vbd.back_type = 1;
    6.25 -		} else if (strcmp(buf,"tap") == 0){
    6.26 -			stat_prefix = "tap_";
    6.27 +		else if (strcmp(buf,"tap") == 0)
    6.28  			vbd.back_type = 2;
    6.29 -		} else {
    6.30 +		else
    6.31  			continue;
    6.32 -		}
    6.33  
    6.34  		domain = xenstat_node_domain(node, domid);
    6.35  		if (domain == NULL) {
    6.36 @@ -235,22 +225,19 @@ int xenstat_collect_vbds(xenstat_node * 
    6.37  			continue;
    6.38  		}
    6.39  
    6.40 -		snprintf(ooreq, sizeof(ooreq), "%s%soo_req", sys_prefix, stat_prefix);
    6.41 -		if((read_attributes_vbd(dp->d_name, ooreq, buf, 256)<=0)
    6.42 +		if((read_attributes_vbd(dp->d_name, "statistics/oo_req", buf, 256)<=0)
    6.43  		   || ((ret = sscanf(buf, "%llu", &vbd.oo_reqs)) != 1))
    6.44  		{
    6.45  			continue;
    6.46  		}
    6.47  
    6.48 -		snprintf(rdreq,  sizeof(rdreq),"%s%srd_req", sys_prefix, stat_prefix);
    6.49 -		if((read_attributes_vbd(dp->d_name, rdreq, buf, 256)<=0)
    6.50 +		if((read_attributes_vbd(dp->d_name, "statistics/rd_req", buf, 256)<=0)
    6.51  		   || ((ret = sscanf(buf, "%llu", &vbd.rd_reqs)) != 1))
    6.52  		{
    6.53  			continue;
    6.54  		}
    6.55  
    6.56 -		snprintf(wrreq,  sizeof(wrreq),"%s%swr_req", sys_prefix, stat_prefix);
    6.57 -		if((read_attributes_vbd(dp->d_name, wrreq, buf, 256)<=0)
    6.58 +		if((read_attributes_vbd(dp->d_name, "statistics/wr_req", buf, 256)<=0)
    6.59  		   || ((ret = sscanf(buf, "%llu", &vbd.wr_reqs)) != 1))
    6.60  		{
    6.61  			continue;
     7.1 --- a/xen/arch/ia64/xen/domain.c	Wed Dec 03 11:43:54 2008 +0900
     7.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Dec 04 11:01:53 2008 +0900
     7.3 @@ -1686,9 +1686,6 @@ int domain_relinquish_resources(struct d
     7.4  	if (is_hvm_domain(d) && d->arch.sal_data)
     7.5  		xfree(d->arch.sal_data);
     7.6  
     7.7 -	/* Free page used by xen oprofile buffer */
     7.8 -	free_xenoprof_pages(d);
     7.9 -
    7.10  	return 0;
    7.11  }
    7.12  
     8.1 --- a/xen/arch/x86/boot/wakeup.S	Wed Dec 03 11:43:54 2008 +0900
     8.2 +++ b/xen/arch/x86/boot/wakeup.S	Thu Dec 04 11:01:53 2008 +0900
     8.3 @@ -50,8 +50,7 @@ 1:      # Show some progress if VGA is r
     8.4  
     8.5          movw    $1, %ax
     8.6          lmsw    %ax             # Turn on CR0.PE 
     8.7 -        jmp     1f
     8.8 -1:      ljmpl   $BOOT_CS32, $bootsym_phys(wakeup_32)
     8.9 +        ljmpl   $BOOT_CS32, $bootsym_phys(wakeup_32)
    8.10  
    8.11  /* This code uses an extended set of video mode numbers. These include:
    8.12   * Aliases for standard modes
     9.1 --- a/xen/arch/x86/domain.c	Wed Dec 03 11:43:54 2008 +0900
     9.2 +++ b/xen/arch/x86/domain.c	Thu Dec 04 11:01:53 2008 +0900
     9.3 @@ -1814,6 +1814,13 @@ int domain_relinquish_resources(struct d
     9.4              unmap_vcpu_info(v);
     9.5          }
     9.6  
     9.7 +        if ( d->arch.pirq_eoi_map != NULL )
     9.8 +        {
     9.9 +            unmap_domain_page_global(d->arch.pirq_eoi_map);
    9.10 +            put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
    9.11 +            d->arch.pirq_eoi_map = NULL;
    9.12 +        }
    9.13 +
    9.14          d->arch.relmem = RELMEM_xen;
    9.15          /* fallthrough */
    9.16  
    9.17 @@ -1857,9 +1864,6 @@ int domain_relinquish_resources(struct d
    9.18          BUG();
    9.19      }
    9.20  
    9.21 -    /* Free page used by xen oprofile buffer. */
    9.22 -    free_xenoprof_pages(d);
    9.23 -
    9.24      if ( is_hvm_domain(d) )
    9.25          hvm_domain_relinquish_resources(d);
    9.26  
    10.1 --- a/xen/arch/x86/domctl.c	Wed Dec 03 11:43:54 2008 +0900
    10.2 +++ b/xen/arch/x86/domctl.c	Thu Dec 04 11:01:53 2008 +0900
    10.3 @@ -326,13 +326,9 @@ long arch_do_domctl(
    10.4  
    10.5      case XEN_DOMCTL_sethvmcontext:
    10.6      { 
    10.7 -        struct hvm_domain_context c;
    10.8 -        struct domain             *d;
    10.9 +        struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
   10.10 +        struct domain *d;
   10.11  
   10.12 -        c.cur = 0;
   10.13 -        c.size = domctl->u.hvmcontext.size;
   10.14 -        c.data = NULL;
   10.15 -        
   10.16          ret = -ESRCH;
   10.17          if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
   10.18              break;
   10.19 @@ -367,8 +363,8 @@ long arch_do_domctl(
   10.20  
   10.21      case XEN_DOMCTL_gethvmcontext:
   10.22      { 
   10.23 -        struct hvm_domain_context c;
   10.24 -        struct domain             *d;
   10.25 +        struct hvm_domain_context c = { 0 };
   10.26 +        struct domain *d;
   10.27  
   10.28          ret = -ESRCH;
   10.29          if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
   10.30 @@ -382,9 +378,7 @@ long arch_do_domctl(
   10.31          if ( !is_hvm_domain(d) ) 
   10.32              goto gethvmcontext_out;
   10.33  
   10.34 -        c.cur = 0;
   10.35          c.size = hvm_save_size(d);
   10.36 -        c.data = NULL;
   10.37  
   10.38          if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
   10.39          {
    11.1 --- a/xen/arch/x86/hpet.c	Wed Dec 03 11:43:54 2008 +0900
    11.2 +++ b/xen/arch/x86/hpet.c	Thu Dec 04 11:01:53 2008 +0900
    11.3 @@ -273,6 +273,9 @@ u64 hpet_setup(void)
    11.4          return hpet_rate;
    11.5      system_reset_latch = system_reset_counter;
    11.6  
    11.7 +    if ( hpet_address == 0 )
    11.8 +        return 0;
    11.9 +
   11.10      set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
   11.11  
   11.12      hpet_id = hpet_read32(HPET_ID);
    12.1 --- a/xen/arch/x86/irq.c	Wed Dec 03 11:43:54 2008 +0900
    12.2 +++ b/xen/arch/x86/irq.c	Thu Dec 04 11:01:53 2008 +0900
    12.3 @@ -18,6 +18,7 @@
    12.4  #include <xen/iommu.h>
    12.5  #include <asm/msi.h>
    12.6  #include <asm/current.h>
    12.7 +#include <asm/flushtlb.h>
    12.8  #include <public/physdev.h>
    12.9  
   12.10  /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
   12.11 @@ -206,16 +207,42 @@ struct pending_eoi {
   12.12  static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
   12.13  #define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
   12.14  
   12.15 +static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
   12.16 +{
   12.17 +    if ( d->arch.pirq_eoi_map )
   12.18 +        set_bit(irq, d->arch.pirq_eoi_map);
   12.19 +}
   12.20 +
   12.21 +static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
   12.22 +{
   12.23 +    if ( d->arch.pirq_eoi_map )
   12.24 +        clear_bit(irq, d->arch.pirq_eoi_map);
   12.25 +}
   12.26 +
   12.27 +static void _irq_guest_eoi(irq_desc_t *desc)
   12.28 +{
   12.29 +    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
   12.30 +    unsigned int i, vector = desc - irq_desc;
   12.31 +
   12.32 +    if ( !(desc->status & IRQ_GUEST_EOI_PENDING) )
   12.33 +        return;
   12.34 +
   12.35 +    for ( i = 0; i < action->nr_guests; ++i )
   12.36 +        clear_pirq_eoi(action->guest[i],
   12.37 +                       domain_vector_to_irq(action->guest[i], vector));
   12.38 +
   12.39 +    desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING);
   12.40 +    desc->handler->enable(vector);
   12.41 +}
   12.42 +
   12.43  static struct timer irq_guest_eoi_timer[NR_VECTORS];
   12.44  static void irq_guest_eoi_timer_fn(void *data)
   12.45  {
   12.46      irq_desc_t *desc = data;
   12.47 -    unsigned vector = desc - irq_desc;
   12.48      unsigned long flags;
   12.49  
   12.50      spin_lock_irqsave(&desc->lock, flags);
   12.51 -    desc->status &= ~IRQ_INPROGRESS;
   12.52 -    desc->handler->enable(vector);
   12.53 +    _irq_guest_eoi(desc);
   12.54      spin_unlock_irqrestore(&desc->lock, flags);
   12.55  }
   12.56  
   12.57 @@ -272,8 +299,22 @@ static void __do_IRQ_guest(int vector)
   12.58  
   12.59      if ( already_pending == action->nr_guests )
   12.60      {
   12.61 +        stop_timer(&irq_guest_eoi_timer[vector]);
   12.62          desc->handler->disable(vector);
   12.63 -        stop_timer(&irq_guest_eoi_timer[vector]);
   12.64 +        desc->status |= IRQ_GUEST_EOI_PENDING;
   12.65 +        for ( i = 0; i < already_pending; ++i )
   12.66 +        {
   12.67 +            d = action->guest[i];
   12.68 +            set_pirq_eoi(d, domain_vector_to_irq(d, vector));
   12.69 +            /*
   12.70 +             * Could check here whether the guest unmasked the event by now
   12.71 +             * (or perhaps just re-issue the send_guest_pirq()), and if it
   12.72 +             * can now accept the event,
   12.73 +             * - clear all the pirq_eoi bits we already set,
   12.74 +             * - re-enable the vector, and
   12.75 +             * - skip the timer setup below.
   12.76 +             */
   12.77 +        }
   12.78          init_timer(&irq_guest_eoi_timer[vector],
   12.79                     irq_guest_eoi_timer_fn, desc, smp_processor_id());
   12.80          set_timer(&irq_guest_eoi_timer[vector], NOW() + MILLISECS(1));
   12.81 @@ -310,7 +351,7 @@ irq_desc_t *domain_spin_lock_irq_desc(
   12.82  }
   12.83  
   12.84  /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
   12.85 -static void flush_ready_eoi(void *unused)
   12.86 +static void flush_ready_eoi(void)
   12.87  {
   12.88      struct pending_eoi *peoi = this_cpu(pending_eoi);
   12.89      irq_desc_t         *desc;
   12.90 @@ -364,7 +405,7 @@ static void set_eoi_ready(void *data)
   12.91      __set_eoi_ready(desc);
   12.92      spin_unlock(&desc->lock);
   12.93  
   12.94 -    flush_ready_eoi(NULL);
   12.95 +    flush_ready_eoi();
   12.96  }
   12.97  
   12.98  static void __pirq_guest_eoi(struct domain *d, int irq)
   12.99 @@ -382,8 +423,12 @@ static void __pirq_guest_eoi(struct doma
  12.100      action = (irq_guest_action_t *)desc->action;
  12.101      vector = desc - irq_desc;
  12.102  
  12.103 -    ASSERT(!test_bit(irq, d->pirq_mask) ||
  12.104 -           (action->ack_type != ACKTYPE_NONE));
  12.105 +    if ( action->ack_type == ACKTYPE_NONE )
  12.106 +    {
  12.107 +        ASSERT(!test_bit(irq, d->pirq_mask));
  12.108 +        stop_timer(&irq_guest_eoi_timer[vector]);
  12.109 +        _irq_guest_eoi(desc);
  12.110 +    }
  12.111  
  12.112      if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
  12.113           unlikely(--action->in_flight != 0) )
  12.114 @@ -408,7 +453,7 @@ static void __pirq_guest_eoi(struct doma
  12.115      {
  12.116          __set_eoi_ready(desc);
  12.117          spin_unlock(&desc->lock);
  12.118 -        flush_ready_eoi(NULL);
  12.119 +        flush_ready_eoi();
  12.120          local_irq_enable();
  12.121      }
  12.122      else
  12.123 @@ -607,6 +652,11 @@ int pirq_guest_bind(struct vcpu *v, int 
  12.124  
  12.125      action->guest[action->nr_guests++] = v->domain;
  12.126  
  12.127 +    if ( action->ack_type != ACKTYPE_NONE )
  12.128 +        set_pirq_eoi(v->domain, irq);
  12.129 +    else
  12.130 +        clear_pirq_eoi(v->domain, irq);
  12.131 +
  12.132   unlock_out:
  12.133      spin_unlock_irq(&desc->lock);
  12.134   out:
  12.135 @@ -1050,6 +1100,6 @@ void fixup_irqs(cpumask_t map)
  12.136      peoi = this_cpu(pending_eoi);
  12.137      for ( sp = 0; sp < pending_eoi_sp(peoi); sp++ )
  12.138          peoi[sp].ready = 1;
  12.139 -    flush_ready_eoi(NULL);
  12.140 +    flush_ready_eoi();
  12.141  }
  12.142  #endif
    13.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Dec 03 11:43:54 2008 +0900
    13.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Dec 04 11:01:53 2008 +0900
    13.3 @@ -1886,13 +1886,6 @@ static shadow_l1e_t * shadow_get_and_cre
    13.4          if ( r & SHADOW_SET_ERROR )
    13.5              return NULL;
    13.6  
    13.7 -#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC )
    13.8 -        /* All pages walked are now pagetables. Safe to resync pages
    13.9 -           in case level 4 or 3 shadows were set. */
   13.10 -        if ( resync )
   13.11 -            shadow_resync_all(v, 0);
   13.12 -#endif
   13.13 -
   13.14          /* This next line is important: in 32-on-PAE and 32-on-64 modes,
   13.15           * the guest l1 table has an 8k shadow, and we need to return
   13.16           * the right mfn of the pair. This call will set it for us as a
   13.17 @@ -1900,6 +1893,14 @@ static shadow_l1e_t * shadow_get_and_cre
   13.18           * compiled out.) */
   13.19          (void) shadow_l1_index(sl1mfn, guest_l1_table_offset(gw->va));
   13.20      }
   13.21 +
   13.22 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC )
   13.23 +    /* All pages walked are now pagetables. Safe to resync pages
   13.24 +       in case level 4 or 3 shadows were set. */
   13.25 +    if ( resync )
   13.26 +        shadow_resync_all(v, 0);
   13.27 +#endif
   13.28 +
   13.29      /* Now follow it down a level.  Guaranteed to succeed. */
   13.30      return sh_linear_l1_table(v) + shadow_l1_linear_offset(gw->va);
   13.31  }
   13.32 @@ -2176,7 +2177,8 @@ static int validate_gl4e(struct vcpu *v,
   13.33              result |= SHADOW_SET_ERROR;
   13.34  
   13.35  #if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC )
   13.36 -        shadow_resync_all(v, 0);
   13.37 +        if ( mfn_valid(sl3mfn) )
   13.38 +            shadow_resync_all(v, 0);
   13.39  #endif
   13.40      }
   13.41      l4e_propagate_from_guest(v, new_gl4e, sl3mfn, &new_sl4e, ft_prefetch);
   13.42 @@ -2232,7 +2234,8 @@ static int validate_gl3e(struct vcpu *v,
   13.43              result |= SHADOW_SET_ERROR;
   13.44  
   13.45  #if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC )
   13.46 -        shadow_resync_all(v, 0);
   13.47 +        if ( mfn_valid(sl2mfn) )
   13.48 +            shadow_resync_all(v, 0);
   13.49  #endif
   13.50      }
   13.51      l3e_propagate_from_guest(v, new_gl3e, sl2mfn, &new_sl3e, ft_prefetch);
   13.52 @@ -2924,6 +2927,7 @@ static int sh_page_fault(struct vcpu *v,
   13.53                 writes to an out of sync page. */
   13.54              if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
   13.55              {
   13.56 +                fast_emul = 0;
   13.57                  v->arch.paging.last_write_emul_ok = 0;
   13.58                  goto page_fault_slow_path;
   13.59              }
    14.1 --- a/xen/arch/x86/physdev.c	Wed Dec 03 11:43:54 2008 +0900
    14.2 +++ b/xen/arch/x86/physdev.c	Thu Dec 04 11:01:53 2008 +0900
    14.3 @@ -14,6 +14,7 @@
    14.4  #include <public/xen.h>
    14.5  #include <public/physdev.h>
    14.6  #include <xsm/xsm.h>
    14.7 +#include <asm/p2m.h>
    14.8  
    14.9  #ifndef COMPAT
   14.10  typedef long ret_t;
   14.11 @@ -191,10 +192,52 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   14.12          ret = -EFAULT;
   14.13          if ( copy_from_guest(&eoi, arg, 1) != 0 )
   14.14              break;
   14.15 +        ret = -EINVAL;
   14.16 +        if ( eoi.irq < 0 || eoi.irq >= NR_IRQS )
   14.17 +            break;
   14.18 +        if ( v->domain->arch.pirq_eoi_map )
   14.19 +            evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
   14.20          ret = pirq_guest_eoi(v->domain, eoi.irq);
   14.21          break;
   14.22      }
   14.23  
   14.24 +    case PHYSDEVOP_pirq_eoi_gmfn: {
   14.25 +        struct physdev_pirq_eoi_gmfn info;
   14.26 +        unsigned long mfn;
   14.27 +
   14.28 +        BUILD_BUG_ON(NR_IRQS > (PAGE_SIZE * 8));
   14.29 +
   14.30 +        ret = -EFAULT;
   14.31 +        if ( copy_from_guest(&info, arg, 1) != 0 )
   14.32 +            break;
   14.33 +
   14.34 +        ret = -EINVAL;
   14.35 +        mfn = gmfn_to_mfn(current->domain, info.gmfn);
   14.36 +        if ( !mfn_valid(mfn) ||
   14.37 +             !get_page_and_type(mfn_to_page(mfn), v->domain,
   14.38 +                                PGT_writable_page) )
   14.39 +            break;
   14.40 +
   14.41 +        if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
   14.42 +        {
   14.43 +            put_page_and_type(mfn_to_page(mfn));
   14.44 +            ret = -EBUSY;
   14.45 +            break;
   14.46 +        }
   14.47 +
   14.48 +        v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
   14.49 +        if ( v->domain->arch.pirq_eoi_map == NULL )
   14.50 +        {
   14.51 +            v->domain->arch.pirq_eoi_map_mfn = 0;
   14.52 +            put_page_and_type(mfn_to_page(mfn));
   14.53 +            ret = -ENOSPC;
   14.54 +            break;
   14.55 +        }
   14.56 +
   14.57 +        ret = 0;
   14.58 +        break;
   14.59 +    }
   14.60 +
   14.61      /* Legacy since 0x00030202. */
   14.62      case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
   14.63          ret = pirq_guest_unmask(v->domain);
    15.1 --- a/xen/arch/x86/x86_64/physdev.c	Wed Dec 03 11:43:54 2008 +0900
    15.2 +++ b/xen/arch/x86/x86_64/physdev.c	Thu Dec 04 11:01:53 2008 +0900
    15.3 @@ -18,6 +18,9 @@
    15.4  #define physdev_eoi                compat_physdev_eoi
    15.5  #define physdev_eoi_t              physdev_eoi_compat_t
    15.6  
    15.7 +#define physdev_pirq_eoi_gmfn      compat_physdev_pirq_eoi_gmfn
    15.8 +#define physdev_pirq_eoi_gmfn_t    physdev_pirq_eoi_gmfn_compat_t
    15.9 +
   15.10  #define physdev_set_iobitmap       compat_physdev_set_iobitmap
   15.11  #define physdev_set_iobitmap_t     physdev_set_iobitmap_compat_t
   15.12  
    16.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.c	Wed Dec 03 11:43:54 2008 +0900
    16.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.c	Thu Dec 04 11:01:53 2008 +0900
    16.3 @@ -28,6 +28,7 @@
    16.4  #define DstImplicit (0<<1) /* Destination operand is implicit in the opcode. */
    16.5  #define DstBitBase  (1<<1) /* Memory operand, bit string. */
    16.6  #define DstReg      (2<<1) /* Register operand. */
    16.7 +#define DstEax      DstReg /* Register EAX (aka DstReg with no ModRM) */
    16.8  #define DstMem      (3<<1) /* Memory operand. */
    16.9  #define DstMask     (3<<1)
   16.10  /* Source operand type. */
   16.11 @@ -51,35 +52,35 @@ static uint8_t opcode_table[256] = {
   16.12      /* 0x00 - 0x07 */
   16.13      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.14      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.15 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps,
   16.16 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps, ImplicitOps,
   16.17      /* 0x08 - 0x0F */
   16.18      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.19      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.20 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, 0,
   16.21 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps, 0,
   16.22      /* 0x10 - 0x17 */
   16.23      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.24      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.25 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps,
   16.26 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps, ImplicitOps,
   16.27      /* 0x18 - 0x1F */
   16.28      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.29      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.30 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps,
   16.31 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps, ImplicitOps,
   16.32      /* 0x20 - 0x27 */
   16.33      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.34      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.35 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps,
   16.36 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps,
   16.37      /* 0x28 - 0x2F */
   16.38      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.39      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.40 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps,
   16.41 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps,
   16.42      /* 0x30 - 0x37 */
   16.43      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.44      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.45 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps,
   16.46 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps,
   16.47      /* 0x38 - 0x3F */
   16.48      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.49      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
   16.50 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps,
   16.51 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps,
   16.52      /* 0x40 - 0x4F */
   16.53      ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
   16.54      ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
   16.55 @@ -125,7 +126,7 @@ static uint8_t opcode_table[256] = {
   16.56      ByteOp|ImplicitOps|Mov, ImplicitOps|Mov,
   16.57      ByteOp|ImplicitOps, ImplicitOps,
   16.58      /* 0xA8 - 0xAF */
   16.59 -    ByteOp|DstReg|SrcImm, DstReg|SrcImm,
   16.60 +    ByteOp|DstEax|SrcImm, DstEax|SrcImm,
   16.61      ByteOp|ImplicitOps|Mov, ImplicitOps|Mov,
   16.62      ByteOp|ImplicitOps|Mov, ImplicitOps|Mov,
   16.63      ByteOp|ImplicitOps, ImplicitOps,
   16.64 @@ -687,12 +688,12 @@ static void __put_rep_prefix(
   16.65  })
   16.66  
   16.67  /* Clip maximum repetitions so that the index register only just wraps. */
   16.68 -#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({                \
   16.69 -    unsigned long __todo = (ctxt->regs->eflags & EF_DF) ? (ea) : ~(ea); \
   16.70 -    __todo = truncate_word(__todo, ad_bytes);                           \
   16.71 -    __todo = (__todo / (bytes_per_rep)) + 1;                            \
   16.72 -    (reps) = (__todo < (reps)) ? __todo : (reps);                       \
   16.73 -    truncate_word((ea), ad_bytes);                                      \
   16.74 +#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({                  \
   16.75 +    unsigned long __todo = (ctxt->regs->eflags & EFLG_DF) ? (ea) : ~(ea); \
   16.76 +    __todo = truncate_word(__todo, ad_bytes);                             \
   16.77 +    __todo = (__todo / (bytes_per_rep)) + 1;                              \
   16.78 +    (reps) = (__todo < (reps)) ? __todo : (reps);                         \
   16.79 +    truncate_word((ea), ad_bytes);                                        \
   16.80  })
   16.81  
   16.82  /* Compatibility function: read guest memory, zero-extend result to a ulong. */
   16.83 @@ -1574,59 +1575,35 @@ x86_emulate(
   16.84  
   16.85      switch ( b )
   16.86      {
   16.87 -    case 0x04 ... 0x05: /* add imm,%%eax */
   16.88 -        dst.reg = (unsigned long *)&_regs.eax;
   16.89 -        dst.val = _regs.eax;
   16.90 -    case 0x00 ... 0x03: add: /* add */
   16.91 +    case 0x00 ... 0x05: add: /* add */
   16.92          emulate_2op_SrcV("add", src, dst, _regs.eflags);
   16.93          break;
   16.94  
   16.95 -    case 0x0c ... 0x0d: /* or imm,%%eax */
   16.96 -        dst.reg = (unsigned long *)&_regs.eax;
   16.97 -        dst.val = _regs.eax;
   16.98 -    case 0x08 ... 0x0b: or:  /* or */
   16.99 +    case 0x08 ... 0x0d: or:  /* or */
  16.100          emulate_2op_SrcV("or", src, dst, _regs.eflags);
  16.101          break;
  16.102  
  16.103 -    case 0x14 ... 0x15: /* adc imm,%%eax */
  16.104 -        dst.reg = (unsigned long *)&_regs.eax;
  16.105 -        dst.val = _regs.eax;
  16.106 -    case 0x10 ... 0x13: adc: /* adc */
  16.107 +    case 0x10 ... 0x15: adc: /* adc */
  16.108          emulate_2op_SrcV("adc", src, dst, _regs.eflags);
  16.109          break;
  16.110  
  16.111 -    case 0x1c ... 0x1d: /* sbb imm,%%eax */
  16.112 -        dst.reg = (unsigned long *)&_regs.eax;
  16.113 -        dst.val = _regs.eax;
  16.114 -    case 0x18 ... 0x1b: sbb: /* sbb */
  16.115 +    case 0x18 ... 0x1d: sbb: /* sbb */
  16.116          emulate_2op_SrcV("sbb", src, dst, _regs.eflags);
  16.117          break;
  16.118  
  16.119 -    case 0x24 ... 0x25: /* and imm,%%eax */
  16.120 -        dst.reg = (unsigned long *)&_regs.eax;
  16.121 -        dst.val = _regs.eax;
  16.122 -    case 0x20 ... 0x23: and: /* and */
  16.123 +    case 0x20 ... 0x25: and: /* and */
  16.124          emulate_2op_SrcV("and", src, dst, _regs.eflags);
  16.125          break;
  16.126  
  16.127 -    case 0x2c ... 0x2d: /* sub imm,%%eax */
  16.128 -        dst.reg = (unsigned long *)&_regs.eax;
  16.129 -        dst.val = _regs.eax;
  16.130 -    case 0x28 ... 0x2b: sub: /* sub */
  16.131 +    case 0x28 ... 0x2d: sub: /* sub */
  16.132          emulate_2op_SrcV("sub", src, dst, _regs.eflags);
  16.133          break;
  16.134  
  16.135 -    case 0x34 ... 0x35: /* xor imm,%%eax */
  16.136 -        dst.reg = (unsigned long *)&_regs.eax;
  16.137 -        dst.val = _regs.eax;
  16.138 -    case 0x30 ... 0x33: xor: /* xor */
  16.139 +    case 0x30 ... 0x35: xor: /* xor */
  16.140          emulate_2op_SrcV("xor", src, dst, _regs.eflags);
  16.141          break;
  16.142  
  16.143 -    case 0x3c ... 0x3d: /* cmp imm,%%eax */
  16.144 -        dst.reg = (unsigned long *)&_regs.eax;
  16.145 -        dst.val = _regs.eax;
  16.146 -    case 0x38 ... 0x3b: cmp: /* cmp */
  16.147 +    case 0x38 ... 0x3d: cmp: /* cmp */
  16.148          emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
  16.149          dst.type = OP_NONE;
  16.150          break;
  16.151 @@ -1988,8 +1965,6 @@ x86_emulate(
  16.152          break;
  16.153  
  16.154      case 0xa8 ... 0xa9: /* test imm,%%eax */
  16.155 -        dst.reg = (unsigned long *)&_regs.eax;
  16.156 -        dst.val = _regs.eax;
  16.157      case 0x84 ... 0x85: test: /* test */
  16.158          emulate_2op_SrcV("test", src, dst, _regs.eflags);
  16.159          dst.type = OP_NONE;
    17.1 --- a/xen/common/domain.c	Wed Dec 03 11:43:54 2008 +0900
    17.2 +++ b/xen/common/domain.c	Thu Dec 04 11:01:53 2008 +0900
    17.3 @@ -553,6 +553,9 @@ static void complete_domain_destroy(stru
    17.4  
    17.5      sched_destroy_domain(d);
    17.6  
    17.7 +    /* Free page used by xen oprofile buffer. */
    17.8 +    free_xenoprof_pages(d);
    17.9 +
   17.10      for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
   17.11          if ( (v = d->vcpu[i]) != NULL )
   17.12              free_vcpu_struct(v);
    18.1 --- a/xen/common/event_channel.c	Wed Dec 03 11:43:54 2008 +0900
    18.2 +++ b/xen/common/event_channel.c	Thu Dec 04 11:01:53 2008 +0900
    18.3 @@ -762,10 +762,9 @@ long evtchn_bind_vcpu(unsigned int port,
    18.4  }
    18.5  
    18.6  
    18.7 -static long evtchn_unmask(evtchn_unmask_t *unmask)
    18.8 +int evtchn_unmask(unsigned int port)
    18.9  {
   18.10      struct domain *d = current->domain;
   18.11 -    int            port = unmask->port;
   18.12      struct vcpu   *v;
   18.13  
   18.14      spin_lock(&d->event_lock);
   18.15 @@ -916,7 +915,7 @@ long do_event_channel_op(int cmd, XEN_GU
   18.16          struct evtchn_unmask unmask;
   18.17          if ( copy_from_guest(&unmask, arg, 1) != 0 )
   18.18              return -EFAULT;
   18.19 -        rc = evtchn_unmask(&unmask);
   18.20 +        rc = evtchn_unmask(unmask.port);
   18.21          break;
   18.22      }
   18.23  
    19.1 --- a/xen/common/timer.c	Wed Dec 03 11:43:54 2008 +0900
    19.2 +++ b/xen/common/timer.c	Thu Dec 04 11:01:53 2008 +0900
    19.3 @@ -494,12 +494,14 @@ static void dump_timerq(unsigned char ke
    19.4          for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
    19.5          {
    19.6              t = ts->heap[j];
    19.7 -            printk ("  %d : %p ex=0x%08X%08X %p\n",
    19.8 -                    j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
    19.9 +            printk ("  %d : %p ex=0x%08X%08X %p %p\n",
   19.10 +                    j, t, (u32)(t->expires>>32), (u32)t->expires,
   19.11 +                    t->data, t->function);
   19.12          }
   19.13          for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
   19.14 -            printk (" L%d : %p ex=0x%08X%08X %p\n",
   19.15 -                    j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
   19.16 +            printk (" L%d : %p ex=0x%08X%08X %p %p\n",
   19.17 +                    j, t, (u32)(t->expires>>32), (u32)t->expires,
   19.18 +                    t->data, t->function);
   19.19          spin_unlock_irqrestore(&ts->lock, flags);
   19.20          printk("\n");
   19.21      }
    20.1 --- a/xen/drivers/char/console.c	Wed Dec 03 11:43:54 2008 +0900
    20.2 +++ b/xen/drivers/char/console.c	Thu Dec 04 11:01:53 2008 +0900
    20.3 @@ -927,7 +927,7 @@ void panic(const char *fmt, ...)
    20.4      console_start_sync();
    20.5      printk("\n****************************************\n");
    20.6      printk("Panic on CPU %d:\n", smp_processor_id());
    20.7 -    printk(buf);
    20.8 +    printk("%s", buf);
    20.9      printk("****************************************\n\n");
   20.10      if ( opt_noreboot )
   20.11          printk("Manual reset required ('noreboot' specified)\n");
    21.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Wed Dec 03 11:43:54 2008 +0900
    21.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Thu Dec 04 11:01:53 2008 +0900
    21.3 @@ -152,13 +152,33 @@ static void __init set_iommu_translation
    21.4  {
    21.5      u32 entry;
    21.6  
    21.7 -    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    21.8 -    set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
    21.9 -                         IOMMU_CONTROL_ENABLED, entry,
   21.10 +    entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
   21.11 +
   21.12 +    if ( enable )
   21.13 +    {
   21.14 +        set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
   21.15 +                         IOMMU_CONTROL_DISABLED, entry,
   21.16                           IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
   21.17                           IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
   21.18 +        set_field_in_reg_u32(iommu->isochronous ? IOMMU_CONTROL_ENABLED :
   21.19 +                         IOMMU_CONTROL_DISABLED, entry,
   21.20 +                         IOMMU_CONTROL_ISOCHRONOUS_MASK,
   21.21 +                         IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry);
   21.22 +        set_field_in_reg_u32(iommu->coherent ? IOMMU_CONTROL_ENABLED :
   21.23 +                         IOMMU_CONTROL_DISABLED, entry,
   21.24 +                         IOMMU_CONTROL_COHERENT_MASK,
   21.25 +                         IOMMU_CONTROL_COHERENT_SHIFT, &entry);
   21.26 +        set_field_in_reg_u32(iommu->res_pass_pw ? IOMMU_CONTROL_ENABLED :
   21.27 +                         IOMMU_CONTROL_DISABLED, entry,
   21.28 +                         IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK,
   21.29 +                         IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry);
   21.30 +        /* do not set PassPW bit */
   21.31 +        set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
   21.32 +                         IOMMU_CONTROL_PASS_POSTED_WRITE_MASK,
   21.33 +                         IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry);
   21.34 +    }
   21.35      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   21.36 -                         IOMMU_CONTROL_ENABLED, entry,
   21.37 +                         IOMMU_CONTROL_DISABLED, entry,
   21.38                           IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
   21.39                           IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
   21.40      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   21.41 @@ -171,7 +191,7 @@ static void __init set_iommu_command_buf
   21.42  
   21.43      entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   21.44      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   21.45 -                         IOMMU_CONTROL_ENABLED, entry,
   21.46 +                         IOMMU_CONTROL_DISABLED, entry,
   21.47                           IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
   21.48                           IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
   21.49      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   21.50 @@ -235,8 +255,7 @@ static void __init set_iommu_event_log_c
   21.51                           IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
   21.52      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   21.53  
   21.54 -    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   21.55 -                         IOMMU_CONTROL_DISABLED, entry,
   21.56 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
   21.57                           IOMMU_CONTROL_COMP_WAIT_INT_MASK,
   21.58                           IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
   21.59      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   21.60 @@ -391,20 +410,19 @@ static void parse_event_log_entry(u32 en
   21.61      u32 code;
   21.62      u64 *addr;
   21.63      char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
   21.64 -                                         "IO_PAGE_FALT",
   21.65 -                                         "DEV_TABLE_HW_ERROR",
   21.66 -                                         "PAGE_TABLE_HW_ERROR",
   21.67 -                                         "ILLEGAL_COMMAND_ERROR",
   21.68 -                                         "COMMAND_HW_ERROR",
   21.69 -                                         "IOTLB_INV_TIMEOUT",
   21.70 -                                         "INVALID_DEV_REQUEST"};
   21.71 +                          "IO_PAGE_FALT",
   21.72 +                          "DEV_TABLE_HW_ERROR",
   21.73 +                          "PAGE_TABLE_HW_ERROR",
   21.74 +                          "ILLEGAL_COMMAND_ERROR",
   21.75 +                          "COMMAND_HW_ERROR",
   21.76 +                          "IOTLB_INV_TIMEOUT",
   21.77 +                          "INVALID_DEV_REQUEST"};
   21.78  
   21.79 -    code = get_field_from_reg_u32(entry[1],
   21.80 -                                           IOMMU_EVENT_CODE_MASK,
   21.81 -                                           IOMMU_EVENT_CODE_SHIFT);
   21.82 +    code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK,
   21.83 +                                            IOMMU_EVENT_CODE_SHIFT);
   21.84  
   21.85 -    if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST)
   21.86 -        || (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
   21.87 +    if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) ||
   21.88 +        (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
   21.89      {
   21.90          amd_iov_error("Invalid event log entry!\n");
   21.91          return;
   21.92 @@ -428,13 +446,20 @@ static void parse_event_log_entry(u32 en
   21.93  static void amd_iommu_page_fault(int vector, void *dev_id,
   21.94                               struct cpu_user_regs *regs)
   21.95  {
   21.96 -    u32  event[4];
   21.97 +    u32 event[4];
   21.98 +    u32 entry;
   21.99      unsigned long flags;
  21.100      int ret = 0;
  21.101      struct amd_iommu *iommu = dev_id;
  21.102  
  21.103      spin_lock_irqsave(&iommu->lock, flags);
  21.104      ret = amd_iommu_read_event_log(iommu, event);
  21.105 +    /* reset interrupt status bit */
  21.106 +    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
  21.107 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  21.108 +                         IOMMU_STATUS_EVENT_LOG_INT_MASK,
  21.109 +                         IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
  21.110 +    writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
  21.111      spin_unlock_irqrestore(&iommu->lock, flags);
  21.112  
  21.113      if ( ret != 0 )
  21.114 @@ -466,7 +491,7 @@ static int set_iommu_interrupt_handler(s
  21.115          amd_iov_error("can't request irq\n");
  21.116          return 0;
  21.117      }
  21.118 -
  21.119 +    iommu->vector = vector;
  21.120      return vector;
  21.121  }
  21.122  
    22.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Wed Dec 03 11:43:54 2008 +0900
    22.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Dec 04 11:01:53 2008 +0900
    22.3 @@ -580,3 +580,47 @@ out:
    22.4      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    22.5      return 0;
    22.6  }
    22.7 +
    22.8 +void invalidate_all_iommu_pages(struct domain *d)
    22.9 +{
   22.10 +    u32 cmd[4], entry;
   22.11 +    unsigned long flags;
   22.12 +    struct amd_iommu *iommu;
   22.13 +    int domain_id = d->domain_id;
   22.14 +    u64 addr_lo = 0x7FFFFFFFFFFFF000ULL & DMA_32BIT_MASK;
   22.15 +    u64 addr_hi = 0x7FFFFFFFFFFFF000ULL >> 32;
   22.16 +
   22.17 +    set_field_in_reg_u32(domain_id, 0,
   22.18 +                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
   22.19 +                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
   22.20 +    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
   22.21 +                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
   22.22 +                         &entry);
   22.23 +    cmd[1] = entry;
   22.24 +
   22.25 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
   22.26 +                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
   22.27 +                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
   22.28 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   22.29 +                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
   22.30 +                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
   22.31 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
   22.32 +                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
   22.33 +                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
   22.34 +    cmd[2] = entry;
   22.35 +
   22.36 +    set_field_in_reg_u32((u32)addr_hi, 0,
   22.37 +                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
   22.38 +                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
   22.39 +    cmd[3] = entry;
   22.40 +
   22.41 +    cmd[0] = 0;
   22.42 +
   22.43 +    for_each_amd_iommu ( iommu )
   22.44 +    {
   22.45 +        spin_lock_irqsave(&iommu->lock, flags);
   22.46 +        send_iommu_command(iommu, cmd);
   22.47 +        flush_command_buffer(iommu);
   22.48 +        spin_unlock_irqrestore(&iommu->lock, flags);
   22.49 +    }
   22.50 +}
    23.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Dec 03 11:43:54 2008 +0900
    23.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Dec 04 11:01:53 2008 +0900
    23.3 @@ -389,6 +389,7 @@ static void deallocate_iommu_page_tables
    23.4  static void amd_iommu_domain_destroy(struct domain *d)
    23.5  {
    23.6      deallocate_iommu_page_tables(d);
    23.7 +    invalidate_all_iommu_pages(d);
    23.8  }
    23.9  
   23.10  static int amd_iommu_return_device(
    24.1 --- a/xen/drivers/passthrough/vtd/dmar.c	Wed Dec 03 11:43:54 2008 +0900
    24.2 +++ b/xen/drivers/passthrough/vtd/dmar.c	Thu Dec 04 11:01:53 2008 +0900
    24.3 @@ -172,6 +172,28 @@ struct acpi_drhd_unit * acpi_find_matche
    24.4      return found ? found : include_all;
    24.5  }
    24.6  
    24.7 +struct acpi_atsr_unit * acpi_find_matched_atsr_unit(u8 bus, u8 devfn)
    24.8 +{
    24.9 +    struct acpi_atsr_unit *atsr;
   24.10 +    struct acpi_atsr_unit *found = NULL, *include_all = NULL;
   24.11 +    int i;
   24.12 +
   24.13 +    list_for_each_entry ( atsr, &acpi_atsr_units, list )
   24.14 +    {
   24.15 +        for (i = 0; i < atsr->scope.devices_cnt; i++)
   24.16 +            if ( atsr->scope.devices[i] == PCI_BDF2(bus, devfn) )
   24.17 +                return atsr;
   24.18 +
   24.19 +        if ( test_bit(bus, atsr->scope.buses) )
   24.20 +            found = atsr;
   24.21 +
   24.22 +        if ( atsr->all_ports )
   24.23 +            include_all = atsr;
   24.24 +    }
   24.25 +
   24.26 +    return found ? found : include_all;
   24.27 +}
   24.28 +
   24.29  /*
   24.30   * Count number of devices in device scope.  Do not include PCI sub
   24.31   * hierarchies.
   24.32 @@ -242,7 +264,6 @@ static int __init acpi_parse_dev_scope(v
   24.33          switch ( acpi_scope->dev_type )
   24.34          {
   24.35          case ACPI_DEV_P2PBRIDGE:
   24.36 -        {
   24.37              sec_bus = pci_conf_read8(
   24.38                  bus, path->dev, path->fn, PCI_SECONDARY_BUS);
   24.39              sub_bus = pci_conf_read8(
   24.40 @@ -253,7 +274,6 @@ static int __init acpi_parse_dev_scope(v
   24.41  
   24.42              dmar_scope_add_buses(scope, sec_bus, sub_bus);
   24.43              break;
   24.44 -        }
   24.45  
   24.46          case ACPI_DEV_MSI_HPET:
   24.47              dprintk(XENLOG_INFO VTDPREFIX, "found MSI HPET: bdf = %x:%x.%x\n",
   24.48 @@ -268,7 +288,6 @@ static int __init acpi_parse_dev_scope(v
   24.49              break;
   24.50  
   24.51          case ACPI_DEV_IOAPIC:
   24.52 -        {
   24.53              dprintk(XENLOG_INFO VTDPREFIX, "found IOAPIC: bdf = %x:%x.%x\n",
   24.54                      bus, path->dev, path->fn);
   24.55  
   24.56 @@ -288,7 +307,6 @@ static int __init acpi_parse_dev_scope(v
   24.57              scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
   24.58              break;
   24.59          }
   24.60 -        }
   24.61  
   24.62          start += acpi_scope->length;
   24.63     }
    25.1 --- a/xen/drivers/passthrough/vtd/dmar.h	Wed Dec 03 11:43:54 2008 +0900
    25.2 +++ b/xen/drivers/passthrough/vtd/dmar.h	Thu Dec 04 11:01:53 2008 +0900
    25.3 @@ -80,6 +80,7 @@ struct acpi_atsr_unit {
    25.4                   idx < rmrr->scope.devices_cnt; idx++)
    25.5  
    25.6  struct acpi_drhd_unit * acpi_find_matched_drhd_unit(u8 bus, u8 devfn);
    25.7 +struct acpi_atsr_unit * acpi_find_matched_atsr_unit(u8 bus, u8 devfn);
    25.8  void dmar_scope_add_buses(struct dmar_scope *scope, u16 sec, u16 sub);
    25.9  void dmar_scope_remove_buses(struct dmar_scope *scope, u16 sec, u16 sub);
   25.10  
    26.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed Dec 03 11:43:54 2008 +0900
    26.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Thu Dec 04 11:01:53 2008 +0900
    26.3 @@ -446,10 +446,6 @@ static int flush_iotlb_reg(void *_iommu,
    26.4      if ( DMA_TLB_IAIG(val) == 0 )
    26.5          dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: flush IOTLB failed\n");
    26.6  
    26.7 -    if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
    26.8 -        dprintk(XENLOG_INFO VTDPREFIX,
    26.9 -                "IOMMU: tlb flush request %x, actual %x\n",
   26.10 -               (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
   26.11      /* flush iotlb entry will implicitly flush write buffer */
   26.12      return 0;
   26.13  }
   26.14 @@ -714,22 +710,22 @@ static void iommu_fault_status(u32 fault
   26.15      if ( fault_status & DMA_FSTS_PFO )
   26.16          dprintk(XENLOG_ERR VTDPREFIX,
   26.17              "iommu_fault_status: Fault Overflow\n");
   26.18 -    else if ( fault_status & DMA_FSTS_PPF )
   26.19 +    if ( fault_status & DMA_FSTS_PPF )
   26.20          dprintk(XENLOG_ERR VTDPREFIX,
   26.21              "iommu_fault_status: Primary Pending Fault\n");
   26.22 -    else if ( fault_status & DMA_FSTS_AFO )
   26.23 +    if ( fault_status & DMA_FSTS_AFO )
   26.24          dprintk(XENLOG_ERR VTDPREFIX,
   26.25              "iommu_fault_status: Advanced Fault Overflow\n");
   26.26 -    else if ( fault_status & DMA_FSTS_APF )
   26.27 +    if ( fault_status & DMA_FSTS_APF )
   26.28          dprintk(XENLOG_ERR VTDPREFIX,
   26.29              "iommu_fault_status: Advanced Pending Fault\n");
   26.30 -    else if ( fault_status & DMA_FSTS_IQE )
   26.31 +    if ( fault_status & DMA_FSTS_IQE )
   26.32          dprintk(XENLOG_ERR VTDPREFIX,
   26.33              "iommu_fault_status: Invalidation Queue Error\n");
   26.34 -    else if ( fault_status & DMA_FSTS_ICE )
   26.35 +    if ( fault_status & DMA_FSTS_ICE )
   26.36          dprintk(XENLOG_ERR VTDPREFIX,
   26.37              "iommu_fault_status: Invalidation Completion Error\n");
   26.38 -    else if ( fault_status & DMA_FSTS_ITE )
   26.39 +    if ( fault_status & DMA_FSTS_ITE )
   26.40          dprintk(XENLOG_ERR VTDPREFIX,
   26.41              "iommu_fault_status: Invalidation Time-out Error\n");
   26.42  }
   26.43 @@ -754,10 +750,11 @@ static void iommu_page_fault(int vector,
   26.44  
   26.45      /* FIXME: ignore advanced fault log */
   26.46      if ( !(fault_status & DMA_FSTS_PPF) )
   26.47 -        return;
   26.48 +        goto clear_overflow;
   26.49 +
   26.50      fault_index = dma_fsts_fault_record_index(fault_status);
   26.51      reg = cap_fault_reg_offset(iommu->cap);
   26.52 -    for ( ; ; )
   26.53 +    while (1)
   26.54      {
   26.55          u8 fault_reason;
   26.56          u16 source_id;
   26.57 @@ -797,8 +794,9 @@ static void iommu_page_fault(int vector,
   26.58          if ( fault_index > cap_num_fault_regs(iommu->cap) )
   26.59              fault_index = 0;
   26.60      }
   26.61 -
   26.62 +clear_overflow:
   26.63      /* clear primary fault overflow */
   26.64 +    fault_status = readl(iommu->reg + DMAR_FSTS_REG);
   26.65      if ( fault_status & DMA_FSTS_PFO )
   26.66      {
   26.67          spin_lock_irqsave(&iommu->register_lock, flags);
   26.68 @@ -1125,10 +1123,11 @@ static int domain_context_mapping_one(
   26.69      unmap_vtd_domain_page(context_entries);
   26.70  
   26.71      /* Context entry was previously non-present (with domid 0). */
   26.72 -    iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
   26.73 -                               DMA_CCMD_MASK_NOBIT, 1);
   26.74 -    if ( iommu_flush_iotlb_dsi(iommu, 0, 1) )
   26.75 +    if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
   26.76 +                                    DMA_CCMD_MASK_NOBIT, 1) )
   26.77          iommu_flush_write_buffer(iommu);
   26.78 +    else
   26.79 +        iommu_flush_iotlb_dsi(iommu, 0, 1);
   26.80  
   26.81      set_bit(iommu->index, &hd->iommu_bitmap);
   26.82      spin_unlock_irqrestore(&iommu->lock, flags);
   26.83 @@ -1308,8 +1307,12 @@ static int domain_context_unmap_one(
   26.84      context_clear_present(*context);
   26.85      context_clear_entry(*context);
   26.86      iommu_flush_cache_entry(context);
   26.87 -    iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0);
   26.88 -    iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
   26.89 +
   26.90 +    if ( iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0) )
   26.91 +        iommu_flush_write_buffer(iommu);
   26.92 +    else
   26.93 +        iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
   26.94 +
   26.95      unmap_vtd_domain_page(context_entries);
   26.96      spin_unlock_irqrestore(&iommu->lock, flags);
   26.97  
    27.1 --- a/xen/drivers/passthrough/vtd/iommu.h	Wed Dec 03 11:43:54 2008 +0900
    27.2 +++ b/xen/drivers/passthrough/vtd/iommu.h	Thu Dec 04 11:01:53 2008 +0900
    27.3 @@ -310,6 +310,10 @@ struct iremap_entry {
    27.4  struct qinval_entry {
    27.5      union {
    27.6          struct {
    27.7 +            u64 lo;
    27.8 +            u64 hi;
    27.9 +        }val;
   27.10 +        struct {
   27.11              struct {
   27.12                  u64 type    : 4,
   27.13                      granu   : 2,
    28.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Wed Dec 03 11:43:54 2008 +0900
    28.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Thu Dec 04 11:01:53 2008 +0900
    28.3 @@ -34,13 +34,13 @@ static void print_qi_regs(struct iommu *
    28.4      u64 val;
    28.5  
    28.6      val = dmar_readq(iommu->reg, DMAR_IQA_REG);
    28.7 -    printk("DMAR_IAQ_REG = %"PRIx64"\n", val);
    28.8 +    printk("DMAR_IQA_REG = %"PRIx64"\n", val);
    28.9  
   28.10      val = dmar_readq(iommu->reg, DMAR_IQH_REG);
   28.11 -    printk("DMAR_IAH_REG = %"PRIx64"\n", val);
   28.12 +    printk("DMAR_IQH_REG = %"PRIx64"\n", val);
   28.13  
   28.14      val = dmar_readq(iommu->reg, DMAR_IQT_REG);
   28.15 -    printk("DMAR_IAT_REG = %"PRIx64"\n", val);
   28.16 +    printk("DMAR_IQT_REG = %"PRIx64"\n", val);
   28.17  }
   28.18  
   28.19  static int qinval_next_index(struct iommu *iommu)
   28.20 @@ -252,14 +252,15 @@ static int gen_dev_iotlb_inv_dsc(struct 
   28.21      qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;
   28.22  
   28.23      qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
   28.24 -    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr;
   28.25 +    qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
   28.26 +    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
   28.27  
   28.28      unmap_vtd_domain_page(qinval_entries);
   28.29      spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
   28.30      return 0;
   28.31  }
   28.32  
   28.33 -int queue_invalidate_device_iotlb(struct iommu *iommu,
   28.34 +int qinval_device_iotlb(struct iommu *iommu,
   28.35      u32 max_invs_pend, u16 sid, u16 size, u64 addr)
   28.36  {
   28.37      int ret = -1;
    29.1 --- a/xen/include/asm-x86/domain.h	Wed Dec 03 11:43:54 2008 +0900
    29.2 +++ b/xen/include/asm-x86/domain.h	Thu Dec 04 11:01:53 2008 +0900
    29.3 @@ -238,6 +238,10 @@ struct arch_domain
    29.4      int vector_pirq[NR_VECTORS];
    29.5      s16 pirq_vector[NR_IRQS];
    29.6  
    29.7 +    /* Shared page for notifying that explicit PIRQ EOI is required. */
    29.8 +    unsigned long *pirq_eoi_map;
    29.9 +    unsigned long pirq_eoi_map_mfn;
   29.10 +
   29.11      /* Pseudophysical e820 map (XENMEM_memory_map).  */
   29.12      struct e820entry e820[3];
   29.13      unsigned int nr_e820;
    30.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Wed Dec 03 11:43:54 2008 +0900
    30.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Dec 04 11:01:53 2008 +0900
    30.3 @@ -63,6 +63,7 @@ void *amd_iommu_get_vptr_from_page_table
    30.4  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
    30.5          unsigned long phys_addr, unsigned long size, int iw, int ir);
    30.6  int amd_iommu_sync_p2m(struct domain *d);
    30.7 +void invalidate_all_iommu_pages(struct domain *d);
    30.8  
    30.9  /* device table functions */
   30.10  void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
    31.1 --- a/xen/include/asm-x86/page.h	Wed Dec 03 11:43:54 2008 +0900
    31.2 +++ b/xen/include/asm-x86/page.h	Thu Dec 04 11:01:53 2008 +0900
    31.3 @@ -228,7 +228,7 @@ void copy_page_sse2(void *, const void *
    31.4  
    31.5  /* Convert between Xen-heap virtual addresses and machine frame numbers. */
    31.6  #define virt_to_mfn(va)     (virt_to_maddr(va) >> PAGE_SHIFT)
    31.7 -#define mfn_to_virt(mfn)    (maddr_to_virt(mfn << PAGE_SHIFT))
    31.8 +#define mfn_to_virt(mfn)    (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
    31.9  
   31.10  /* Convert between machine frame numbers and page-info structures. */
   31.11  #define mfn_to_page(mfn)    (frame_table + (mfn))
    32.1 --- a/xen/include/public/physdev.h	Wed Dec 03 11:43:54 2008 +0900
    32.2 +++ b/xen/include/public/physdev.h	Thu Dec 04 11:01:53 2008 +0900
    32.3 @@ -41,6 +41,21 @@ typedef struct physdev_eoi physdev_eoi_t
    32.4  DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
    32.5  
    32.6  /*
    32.7 + * Register a shared page for the hypervisor to indicate whether the guest
    32.8 + * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
    32.9 + * once the guest used this function in that the associated event channel
   32.10 + * will automatically get unmasked. The page registered is used as a bit
   32.11 + * array indexed by Xen's PIRQ value.
   32.12 + */
   32.13 +#define PHYSDEVOP_pirq_eoi_gmfn         17
   32.14 +struct physdev_pirq_eoi_gmfn {
   32.15 +    /* IN */
   32.16 +    xen_pfn_t gmfn;
   32.17 +};
   32.18 +typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
   32.19 +DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
   32.20 +
   32.21 +/*
   32.22   * Query the status of an IRQ line.
   32.23   * @arg == pointer to physdev_irq_status_query structure.
   32.24   */
    33.1 --- a/xen/include/xen/event.h	Wed Dec 03 11:43:54 2008 +0900
    33.2 +++ b/xen/include/xen/event.h	Thu Dec 04 11:01:53 2008 +0900
    33.3 @@ -44,6 +44,9 @@ int evtchn_send(struct domain *d, unsign
    33.4  /* Bind a local event-channel port to the specified VCPU. */
    33.5  long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
    33.6  
    33.7 +/* Unmask a local event-channel port. */
    33.8 +int evtchn_unmask(unsigned int port);
    33.9 +
   33.10  /* Allocate/free a Xen-attached event channel port. */
   33.11  int alloc_unbound_xen_event_channel(
   33.12      struct vcpu *local_vcpu, domid_t remote_domid);
    34.1 --- a/xen/include/xen/irq.h	Wed Dec 03 11:43:54 2008 +0900
    34.2 +++ b/xen/include/xen/irq.h	Thu Dec 04 11:01:53 2008 +0900
    34.3 @@ -22,6 +22,7 @@ struct irqaction
    34.4  #define IRQ_PENDING	4	/* IRQ pending - replay on enable */
    34.5  #define IRQ_REPLAY	8	/* IRQ has been replayed but not acked yet */
    34.6  #define IRQ_GUEST       16      /* IRQ is handled by guest OS(es) */
    34.7 +#define IRQ_GUEST_EOI_PENDING 32 /* IRQ was disabled, pending a guest EOI */
    34.8  #define IRQ_PER_CPU     256     /* IRQ is per CPU */
    34.9  
   34.10  /*
    35.1 --- a/xen/tools/symbols.c	Wed Dec 03 11:43:54 2008 +0900
    35.2 +++ b/xen/tools/symbols.c	Thu Dec 04 11:01:53 2008 +0900
    35.3 @@ -81,7 +81,8 @@ static int read_symbol(FILE *in, struct 
    35.4  	if (rc != 3) {
    35.5  		if (rc != EOF) {
    35.6  			/* skip line */
    35.7 -			fgets(str, 500, in);
    35.8 +			if (fgets(str, 500, in) == NULL)
    35.9 +				return -1; /* must check fgets result */
   35.10  		}
   35.11  		return -1;
   35.12  	}