ia64/xen-unstable

changeset 7845:5019c8d034c4

Merged.
author emellor@leeni.uk.xensource.com
date Wed Nov 16 11:32:14 2005 +0100 (2005-11-16)
parents fa76c455af35 9bf6f907b3ff
children 592d8f74d23d dee321bf18e9
files tools/python/xen/xend/XendDomain.py
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Wed Nov 16 11:31:57 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Wed Nov 16 11:32:14 2005 +0100
     1.3 @@ -752,7 +752,7 @@ void __init init_IRQ(void)
     1.4  		irq_bindcount[dynirq_to_irq(i)] = 0;
     1.5  
     1.6  		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
     1.7 -		irq_desc[dynirq_to_irq(i)].action  = 0;
     1.8 +		irq_desc[dynirq_to_irq(i)].action  = NULL;
     1.9  		irq_desc[dynirq_to_irq(i)].depth   = 1;
    1.10  		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
    1.11  	}
    1.12 @@ -770,7 +770,7 @@ void __init init_IRQ(void)
    1.13  #endif
    1.14  
    1.15  		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
    1.16 -		irq_desc[pirq_to_irq(i)].action  = 0;
    1.17 +		irq_desc[pirq_to_irq(i)].action  = NULL;
    1.18  		irq_desc[pirq_to_irq(i)].depth   = 1;
    1.19  		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
    1.20  	}
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Wed Nov 16 11:31:57 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Wed Nov 16 11:32:14 2005 +0100
     2.3 @@ -188,7 +188,7 @@ static int __do_suspend(void *ignore)
     2.4  	xenbus_resume();
     2.5  
     2.6  #ifdef CONFIG_SMP
     2.7 -	for_each_present_cpu(i)
     2.8 +	for_each_cpu(i)
     2.9  		vcpu_prepare(i);
    2.10  
    2.11   out_reenable_cpus:
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c	Wed Nov 16 11:31:57 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c	Wed Nov 16 11:32:14 2005 +0100
     3.3 @@ -277,6 +277,18 @@ void __devinit smp_prepare_boot_cpu(void
     3.4  
     3.5  #ifdef CONFIG_HOTPLUG_CPU
     3.6  
     3.7 +/*
     3.8 + * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
     3.9 + * But do it early enough to catch critical for_each_present_cpu() loops
    3.10 + * in i386-specific code.
    3.11 + */
    3.12 +static int __init initialize_cpu_present_map(void)
    3.13 +{
    3.14 +	cpu_present_map = cpu_possible_map;
    3.15 +	return 0;
    3.16 +}
    3.17 +core_initcall(initialize_cpu_present_map);
    3.18 +
    3.19  static void vcpu_hotplug(unsigned int cpu)
    3.20  {
    3.21  	int err;
    3.22 @@ -293,7 +305,6 @@ static void vcpu_hotplug(unsigned int cp
    3.23  	}
    3.24  
    3.25  	if (strcmp(state, "online") == 0) {
    3.26 -		cpu_set(cpu, cpu_present_map);
    3.27  		(void)cpu_up(cpu);
    3.28  	} else if (strcmp(state, "offline") == 0) {
    3.29  		(void)cpu_down(cpu);
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Wed Nov 16 11:31:57 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Wed Nov 16 11:32:14 2005 +0100
     4.3 @@ -1,6 +1,7 @@
     4.4  
     4.5  #include <linux/config.h>
     4.6  #include <linux/proc_fs.h>
     4.7 +#include <asm-xen/xen_proc.h>
     4.8  
     4.9  static struct proc_dir_entry *xen_base;
    4.10  
     5.1 --- a/linux-2.6-xen-sparse/drivers/acpi/tables.c	Wed Nov 16 11:31:57 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/drivers/acpi/tables.c	Wed Nov 16 11:32:14 2005 +0100
     5.3 @@ -565,7 +565,7 @@ acpi_table_get_sdt (
     5.4   * 
     5.5   * result: sdt_entry[] is initialized
     5.6   */
     5.7 -#ifdef CONFIG_XEN_X86
     5.8 +#if defined(CONFIG_XEN_X86) || defined(CONFIG_XEN_X86_64)
     5.9  #define acpi_rsdp_phys_to_va(rsdp_phys) (__fix_to_virt(FIX_ACPI_RSDP_PAGE) + \
    5.10  					   (rsdp_phys & ~PAGE_MASK))
    5.11  #else
     6.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Wed Nov 16 11:31:57 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Wed Nov 16 11:32:14 2005 +0100
     6.3 @@ -438,7 +438,7 @@ static struct attribute* xen_attrs[] = {
     6.4  	&dev_attr_pcrs.attr,
     6.5  	&dev_attr_caps.attr,
     6.6  	&dev_attr_cancel.attr,
     6.7 -	0,
     6.8 +	NULL,
     6.9  };
    6.10  
    6.11  static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Wed Nov 16 11:31:57 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Wed Nov 16 11:32:14 2005 +0100
     7.3 @@ -543,7 +543,7 @@ static int __init blkif_init(void)
     7.4  	spin_lock_init(&blkio_schedule_list_lock);
     7.5  	INIT_LIST_HEAD(&blkio_schedule_list);
     7.6  
     7.7 -	ret = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
     7.8 +	ret = kernel_thread(blkio_schedule, NULL, CLONE_FS | CLONE_FILES);
     7.9  	BUG_ON(ret < 0);
    7.10  
    7.11  	blkif_xenbus_init();
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Wed Nov 16 11:31:57 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Wed Nov 16 11:32:14 2005 +0100
     8.3 @@ -854,7 +854,7 @@ int register_xenstore_notifier(struct no
     8.4  {
     8.5  	int ret = 0;
     8.6  
     8.7 -        if(xenstored_ready > 0) 
     8.8 +	if (xenstored_ready > 0) 
     8.9  		ret = nb->notifier_call(nb, 0, NULL);
    8.10  	else 
    8.11  		notifier_chain_register(&xenstore_chain, nb);
    8.12 @@ -884,7 +884,7 @@ void xenbus_probe(void *unused)
    8.13  	register_xenbus_watch(&be_watch);
    8.14  
    8.15  	/* Notify others that xenstore is up */
    8.16 -	notifier_call_chain(&xenstore_chain, 0, 0);
    8.17 +	notifier_call_chain(&xenstore_chain, 0, NULL);
    8.18  }
    8.19  
    8.20  
     9.1 --- a/tools/python/xen/xend/XendDomain.py	Wed Nov 16 11:31:57 2005 +0100
     9.2 +++ b/tools/python/xen/xend/XendDomain.py	Wed Nov 16 11:32:14 2005 +0100
     9.3 @@ -36,6 +36,7 @@ from xen.xend import XendCheckpoint
     9.4  from xen.xend.XendError import XendError
     9.5  from xen.xend.XendLogging import log
     9.6  from xen.xend.server import relocate
     9.7 +from xen.xend.xenstore.xswatch import xswatch
     9.8  
     9.9  
    9.10  xc = xen.lowlevel.xc.new()
    9.11 @@ -58,9 +59,11 @@ class XendDomain:
    9.12          # to import XendDomain from XendDomainInfo causes unbounded recursion.
    9.13          # So we stuff the XendDomain instance (self) into xroot's components.
    9.14          xroot.add_component("xen.xend.XendDomain", self)
    9.15 +
    9.16          self.domains = {}
    9.17          self.domains_lock = threading.RLock()
    9.18 -        self.watchReleaseDomain()
    9.19 +
    9.20 +        xswatch("@releaseDomain", self.onReleaseDomain)
    9.21  
    9.22          self.domains_lock.acquire()
    9.23          try:
    9.24 @@ -112,11 +115,7 @@ class XendDomain:
    9.25              self.refresh()
    9.26          finally:
    9.27              self.domains_lock.release()
    9.28 -            
    9.29 -
    9.30 -    def watchReleaseDomain(self):
    9.31 -        from xen.xend.xenstore.xswatch import xswatch
    9.32 -        self.releaseDomain = xswatch("@releaseDomain", self.onReleaseDomain)
    9.33 +        return 1
    9.34  
    9.35  
    9.36      def xen_domains(self):
    10.1 --- a/tools/python/xen/xend/server/DevController.py	Wed Nov 16 11:31:57 2005 +0100
    10.2 +++ b/tools/python/xen/xend/server/DevController.py	Wed Nov 16 11:32:14 2005 +0100
    10.3 @@ -29,6 +29,12 @@ DEVICE_CREATE_TIMEOUT = 5
    10.4  HOTPLUG_STATUS_NODE = "hotplug-status"
    10.5  HOTPLUG_STATUS_ERROR = "error"
    10.6  
    10.7 +Connected = 1
    10.8 +Died      = 2
    10.9 +Error     = 3
   10.10 +Missing   = 4
   10.11 +Timeout   = 5
   10.12 +
   10.13  xenbusState = {
   10.14      'Unknown'      : 0,
   10.15      'Initialising' : 1,
   10.16 @@ -87,18 +93,28 @@ class DevController:
   10.17      def waitForDevice(self, devid):
   10.18          log.debug("Waiting for %s.", devid)
   10.19          
   10.20 -        status, fn_ret = self.waitForBackend(devid)
   10.21 -        if status:
   10.22 +        status = self.waitForBackend(devid)
   10.23 +
   10.24 +        if status == Timeout:
   10.25 +            self.destroyDevice(devid)
   10.26 +            raise VmError("Device %s (%s) could not be connected. "
   10.27 +                          "Hotplug scripts not working" %
   10.28 +                          (devid, self.deviceClass))
   10.29 +
   10.30 +        elif status == Error:
   10.31              self.destroyDevice(devid)
   10.32 -            raise VmError( ("Device %s (%s) could not be connected. "
   10.33 -                            "Hotplug scripts not working") 
   10.34 -                            % (devid, self.deviceClass))
   10.35 +            raise VmError("Device %s (%s) could not be connected. "
   10.36 +                          "Backend device not found" %
   10.37 +                          (devid, self.deviceClass))
   10.38  
   10.39 -        elif fn_ret == HOTPLUG_STATUS_ERROR:
   10.40 +        elif status == Missing:
   10.41 +            raise VmError("Device %s (%s) could not be connected. "
   10.42 +                          "Device not found" % (devid, self.deviceClass))
   10.43 +
   10.44 +        elif status == Died:
   10.45              self.destroyDevice(devid)
   10.46 -            raise VmError( ("Device %s (%s) could not be connected. "
   10.47 -                            "Backend device not found!") 
   10.48 -                            % (devid, self.deviceClass))
   10.49 +            raise VmError("Device %s (%s) could not be connected. "
   10.50 +                          "Device has died" % (devid, self.deviceClass))
   10.51  
   10.52  
   10.53      def reconfigureDevice(self, devid, config):
   10.54 @@ -302,35 +318,22 @@ class DevController:
   10.55                  raise
   10.56  
   10.57  
   10.58 -    def waitForBackend(self,devid):
   10.59 -        ev = Event()
   10.60 +    def waitForBackend(self, devid):
   10.61  
   10.62 -        def hotplugStatus():
   10.63 -            log.debug("hotplugStatus %d", devid)
   10.64 -            
   10.65 -            try:
   10.66 -                status = self.readBackend(devid, HOTPLUG_STATUS_NODE)
   10.67 -            except VmError:
   10.68 -                status = "died"
   10.69 -            if status is not None:
   10.70 -                watch.xs.unwatch(backpath, watch)
   10.71 -                hotplugStatus.value = status
   10.72 -                ev.set()
   10.73 -
   10.74 -        hotplugStatus.value = None
   10.75          frontpath = self.frontendPath(devid)
   10.76          backpath = xstransact.Read(frontpath, "backend")
   10.77  
   10.78          if backpath:
   10.79 -            watch = xswatch(backpath, hotplugStatus)
   10.80 +            statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
   10.81 +            ev = Event()
   10.82 +            result = { 'status': Timeout }
   10.83 +            
   10.84 +            xswatch(statusPath, hotplugStatusCallback, statusPath, ev, result)
   10.85  
   10.86              ev.wait(DEVICE_CREATE_TIMEOUT)
   10.87 -            if ev.isSet():
   10.88 -                return (0, hotplugStatus.value)
   10.89 -            else:
   10.90 -                return (-1, hotplugStatus.value)
   10.91 +            return result['status']
   10.92          else:
   10.93 -            return (-1, "missing")
   10.94 +            return Missing
   10.95  
   10.96  
   10.97      def backendPath(self, backdom, devid):
   10.98 @@ -352,3 +355,25 @@ class DevController:
   10.99      def frontendMiscPath(self):
  10.100          return "%s/device-misc/%s" % (self.vm.getDomainPath(),
  10.101                                        self.deviceClass)
  10.102 +
  10.103 +
  10.104 +def hotplugStatusCallback(statusPath, ev, result):
  10.105 +    log.debug("hotplugStatusCallback %s.", statusPath)
  10.106 +
  10.107 +    try:
  10.108 +        status = xstransact.Read(statusPath)
  10.109 +
  10.110 +        if status is not None:
  10.111 +            if status == HOTPLUG_STATUS_ERROR:
  10.112 +                result['status'] = Error
  10.113 +            else:
  10.114 +                result['status'] = Connected
  10.115 +        else:
  10.116 +            return 1
  10.117 +    except VmError:
  10.118 +        result['status'] = Died
  10.119 +
  10.120 +    log.debug("hotplugStatusCallback %d.", result['status'])
  10.121 +
  10.122 +    ev.set()
  10.123 +    return 0
    11.1 --- a/tools/python/xen/xend/xenstore/xstransact.py	Wed Nov 16 11:31:57 2005 +0100
    11.2 +++ b/tools/python/xen/xend/xenstore/xstransact.py	Wed Nov 16 11:32:14 2005 +0100
    11.3 @@ -177,18 +177,15 @@ class xstransact:
    11.4                  (key, fn, defval) = tup
    11.5  
    11.6              val = self._read(key)
    11.7 -            # If fn is str, then this will successfully convert None to
    11.8 -            # 'None'.  If it is int, then it will throw TypeError on None, or
    11.9 -            # on any other non-integer value.  We have to, therefore, both
   11.10 -            # check explicitly for None, and catch TypeError.  Either failure
   11.11 -            # will result in defval being used instead.
   11.12 +            # If fn is str, then this will successfully convert None to 'None'
   11.13 +            # (which we don't want).  If it is int or float, then it will
   11.14 +            # throw ValueError on any non-convertible value.  We check
   11.15 +            # explicitly for None, using defval instead, but allow ValueError
   11.16 +            # to propagate.
   11.17              if val is None:
   11.18                  val = defval
   11.19              else:
   11.20 -                try:
   11.21 -                    val = fn(val)
   11.22 -                except TypeError:
   11.23 -                    val = defval
   11.24 +                val = fn(val)
   11.25              ret.append(val)
   11.26          if len(ret) == 1:
   11.27              return ret[0]
    12.1 --- a/tools/python/xen/xend/xenstore/xswatch.py	Wed Nov 16 11:31:57 2005 +0100
    12.2 +++ b/tools/python/xen/xend/xenstore/xswatch.py	Wed Nov 16 11:32:14 2005 +0100
    12.3 @@ -5,9 +5,7 @@
    12.4  # Public License.  See the file "COPYING" in the main directory of
    12.5  # this archive for more details.
    12.6  
    12.7 -import select
    12.8  import threading
    12.9 -from xen.lowlevel import xs
   12.10  from xen.xend.xenstore.xsutil import xshandle
   12.11  
   12.12  from xen.xend.XendLogging import log
   12.13 @@ -15,42 +13,45 @@ from xen.xend.XendLogging import log
   12.14  
   12.15  class xswatch:
   12.16  
   12.17 -    watchThread = None
   12.18 -    xs = None
   12.19 -    xslock = threading.Lock()
   12.20 -    
   12.21      def __init__(self, path, fn, *args, **kwargs):
   12.22 +        self.path = path
   12.23          self.fn = fn
   12.24          self.args = args
   12.25          self.kwargs = kwargs
   12.26 -        xswatch.watchStart()
   12.27 -        xswatch.xs.watch(path, self)
   12.28 -
   12.29 -    def watchStart(cls):
   12.30 -        cls.xslock.acquire()
   12.31 -        if cls.watchThread:
   12.32 -            cls.xslock.release()
   12.33 -            return
   12.34 -        cls.xs = xshandle()
   12.35 -        cls.watchThread = threading.Thread(name="Watcher",
   12.36 -                                           target=cls.watchMain)
   12.37 -        cls.watchThread.setDaemon(True)
   12.38 -        cls.watchThread.start()
   12.39 -        cls.xslock.release()
   12.40 -
   12.41 -    watchStart = classmethod(watchStart)
   12.42 -
   12.43 -    def watchMain(cls):
   12.44 -        while True:
   12.45 -            try:
   12.46 -                we = cls.xs.read_watch()
   12.47 -                watch = we[1]
   12.48 -                watch.fn(*watch.args, **watch.kwargs)
   12.49 -            except:
   12.50 -                log.exception("read_watch failed")
   12.51 -                # Ignore this exception -- there's no point throwing it
   12.52 -                # further on because that will just kill the watcher thread,
   12.53 -                # which achieves nothing.
   12.54 +        watchStart()
   12.55 +        xs.watch(path, self)
   12.56  
   12.57  
   12.58 -    watchMain = classmethod(watchMain)
   12.59 +watchThread = None
   12.60 +xs = None
   12.61 +xslock = threading.Lock()
   12.62 +
   12.63 +def watchStart():
   12.64 +    global watchThread
   12.65 +    global xs
   12.66 +    
   12.67 +    xslock.acquire()
   12.68 +    try:
   12.69 +        if watchThread:
   12.70 +            return
   12.71 +        xs = xshandle()
   12.72 +        watchThread = threading.Thread(name="Watcher", target=watchMain)
   12.73 +        watchThread.setDaemon(True)
   12.74 +        watchThread.start()
   12.75 +    finally:
   12.76 +        xslock.release()
   12.77 +
   12.78 +
   12.79 +def watchMain():
   12.80 +    while True:
   12.81 +        try:
   12.82 +            we = xs.read_watch()
   12.83 +            watch = we[1]
   12.84 +            res = watch.fn(*watch.args, **watch.kwargs)
   12.85 +            if not res:
   12.86 +                xs.unwatch(watch.path, watch)
   12.87 +        except:
   12.88 +            log.exception("read_watch failed")
   12.89 +            # Ignore this exception -- there's no point throwing it
   12.90 +            # further on because that will just kill the watcher thread,
   12.91 +            # which achieves nothing.