ia64/linux-2.6.18-xen.hg

changeset 672:7d032c5bb346

merge with linux-2.6.18-xen.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Sep 16 21:25:54 2008 +0900 (2008-09-16)
parents 6fcc6c1e87f6 8ca4d2b16eb3
children 3161879fdf22
files
line diff
     1.1 --- a/arch/i386/kernel/acpi/processor_extcntl_xen.c	Fri Sep 12 11:28:00 2008 +0900
     1.2 +++ b/arch/i386/kernel/acpi/processor_extcntl_xen.c	Tue Sep 16 21:25:54 2008 +0900
     1.3 @@ -32,8 +32,6 @@
     1.4  #include <acpi/processor.h>
     1.5  #include <asm/hypercall.h>
     1.6  
     1.7 -static int xen_processor_pmbits;
     1.8 -
     1.9  static int xen_cx_notifier(struct acpi_processor *pr, int action)
    1.10  {
    1.11  	int ret, count = 0, i;
    1.12 @@ -143,7 +141,7 @@ static void convert_psd_pack(struct xen_
    1.13  
    1.14  static int xen_px_notifier(struct acpi_processor *pr, int action)
    1.15  {
    1.16 -	int ret;
    1.17 +	int ret = -EINVAL;
    1.18  	xen_platform_op_t op = {
    1.19  		.cmd			= XENPF_set_processor_pminfo,
    1.20  		.interface_version	= XENPF_INTERFACE_VERSION,
    1.21 @@ -155,48 +153,66 @@ static int xen_px_notifier(struct acpi_p
    1.22  	struct acpi_processor_performance *px;
    1.23  	struct acpi_psd_package *pdomain;
    1.24  
    1.25 -	/* leave dynamic ppc handle in the future */
    1.26 -	if (action == PROCESSOR_PM_CHANGE)
    1.27 -		return 0;
    1.28 +	if (!pr)
    1.29 +		return -EINVAL;
    1.30  
    1.31  	perf = &op.u.set_pminfo.perf;
    1.32  	px = pr->performance;
    1.33  
    1.34 -	perf->flags = XEN_PX_PPC | 
    1.35 -		      XEN_PX_PCT | 
    1.36 -		      XEN_PX_PSS | 
    1.37 -		      XEN_PX_PSD;
    1.38 -
    1.39 -	/* ppc */
    1.40 -	perf->ppc = pr->performance_platform_limit;
    1.41 -
    1.42 -	/* pct */
    1.43 -	convert_pct_reg(&perf->control_register, &px->control_register);
    1.44 -	convert_pct_reg(&perf->status_register, &px->status_register);
    1.45 +	switch(action) {
    1.46 +	case PROCESSOR_PM_CHANGE:
    1.47 +		/* ppc dynamic handle */
    1.48 +		perf->flags = XEN_PX_PPC;
    1.49 +		perf->platform_limit = pr->performance_platform_limit;
    1.50  
    1.51 -	/* pss */
    1.52 -	perf->state_count = px->state_count;
    1.53 -	states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
    1.54 -	if (!states)
    1.55 -		return -ENOMEM;
    1.56 -	convert_pss_states(states, px->states, px->state_count);
    1.57 -	set_xen_guest_handle(perf->states, states);
    1.58 +		ret = HYPERVISOR_platform_op(&op);
    1.59 +		break;
    1.60  
    1.61 -	/* psd */
    1.62 -	pdomain = &px->domain_info;
    1.63 -	convert_psd_pack(&perf->domain_info, pdomain);
    1.64 -	if (perf->domain_info.num_processors) {
    1.65 +	case PROCESSOR_PM_INIT:
    1.66 +		/* px normal init */
    1.67 +		perf->flags = XEN_PX_PPC | 
    1.68 +			      XEN_PX_PCT | 
    1.69 +			      XEN_PX_PSS | 
    1.70 +			      XEN_PX_PSD;
    1.71 +
    1.72 +		/* ppc */
    1.73 +		perf->platform_limit = pr->performance_platform_limit;
    1.74 +
    1.75 +		/* pct */
    1.76 +		convert_pct_reg(&perf->control_register, &px->control_register);
    1.77 +		convert_pct_reg(&perf->status_register, &px->status_register);
    1.78 +
    1.79 +		/* pss */
    1.80 +		perf->state_count = px->state_count;
    1.81 +		states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
    1.82 +		if (!states)
    1.83 +			return -ENOMEM;
    1.84 +		convert_pss_states(states, px->states, px->state_count);
    1.85 +		set_xen_guest_handle(perf->states, states);
    1.86 +
    1.87 +		/* psd */
    1.88 +		pdomain = &px->domain_info;
    1.89 +		convert_psd_pack(&perf->domain_info, pdomain);
    1.90  		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
    1.91  			perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
    1.92  		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
    1.93  			perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
    1.94  		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
    1.95  			perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
    1.96 -	} else
    1.97 -		perf->shared_type = CPUFREQ_SHARED_TYPE_NONE;
    1.98 +		else {
    1.99 +			ret = -ENODEV;
   1.100 +			kfree(states);
   1.101 +			break;
   1.102 +		}
   1.103  
   1.104 -	ret = HYPERVISOR_platform_op(&op);
   1.105 -	kfree(states);
   1.106 +		ret = HYPERVISOR_platform_op(&op);
   1.107 +		kfree(states);
   1.108 +		break;
   1.109 +
   1.110 +	default:
   1.111 +		break;
   1.112 +	}
   1.113 +
   1.114  	return ret;
   1.115  }
   1.116  
   1.117 @@ -215,13 +231,13 @@ static struct processor_extcntl_ops xen_
   1.118  
   1.119  void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
   1.120  {
   1.121 -	xen_processor_pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
   1.122 +	unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
   1.123  
   1.124 -	if (xen_processor_pmbits & XEN_PROCESSOR_PM_CX)
   1.125 +	if (pmbits & XEN_PROCESSOR_PM_CX)
   1.126  		xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
   1.127 -	if (xen_processor_pmbits & XEN_PROCESSOR_PM_PX)
   1.128 +	if (pmbits & XEN_PROCESSOR_PM_PX)
   1.129  		xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
   1.130 -	if (xen_processor_pmbits & XEN_PROCESSOR_PM_TX)
   1.131 +	if (pmbits & XEN_PROCESSOR_PM_TX)
   1.132  		xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
   1.133  
   1.134  	*ops = &xen_extcntl_ops;
     2.1 --- a/arch/i386/kernel/io_apic-xen.c	Fri Sep 12 11:28:00 2008 +0900
     2.2 +++ b/arch/i386/kernel/io_apic-xen.c	Tue Sep 16 21:25:54 2008 +0900
     2.3 @@ -47,6 +47,7 @@
     2.4  
     2.5  #include <xen/interface/xen.h>
     2.6  #include <xen/interface/physdev.h>
     2.7 +#include <xen/evtchn.h>
     2.8  
     2.9  /* Fake i8259 */
    2.10  #define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
    2.11 @@ -1260,7 +1261,7 @@ static void ioapic_register_intr(int irq
    2.12  	set_intr_gate(vector, interrupt[idx]);
    2.13  }
    2.14  #else
    2.15 -#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
    2.16 +#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
    2.17  #endif
    2.18  
    2.19  static void __init setup_IO_APIC_irqs(void)
     3.1 --- a/arch/x86_64/kernel/io_apic-xen.c	Fri Sep 12 11:28:00 2008 +0900
     3.2 +++ b/arch/x86_64/kernel/io_apic-xen.c	Tue Sep 16 21:25:54 2008 +0900
     3.3 @@ -95,6 +95,7 @@ int vector_irq[NR_VECTORS] __read_mostly
     3.4  
     3.5  #include <xen/interface/xen.h>
     3.6  #include <xen/interface/physdev.h>
     3.7 +#include <xen/evtchn.h>
     3.8  
     3.9  /* Fake i8259 */
    3.10  #define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
    3.11 @@ -940,7 +941,7 @@ static void ioapic_register_intr(int irq
    3.12  	set_intr_gate(vector, interrupt[idx]);
    3.13  }
    3.14  #else
    3.15 -#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
    3.16 +#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
    3.17  #endif /* !CONFIG_XEN */
    3.18  
    3.19  static void __init setup_IO_APIC_irqs(void)
     4.1 --- a/buildconfigs/linux-defconfig_xen0_x86_32	Fri Sep 12 11:28:00 2008 +0900
     4.2 +++ b/buildconfigs/linux-defconfig_xen0_x86_32	Tue Sep 16 21:25:54 2008 +0900
     4.3 @@ -248,7 +248,7 @@ CONFIG_PCI_MMCONFIG=y
     4.4  CONFIG_XEN_PCIDEV_FRONTEND=y
     4.5  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
     4.6  # CONFIG_PCIEPORTBUS is not set
     4.7 -# CONFIG_PCI_MSI is not set
     4.8 +CONFIG_PCI_MSI=y
     4.9  # CONFIG_PCI_DEBUG is not set
    4.10  CONFIG_ISA_DMA_API=y
    4.11  # CONFIG_SCx200 is not set
     5.1 --- a/buildconfigs/linux-defconfig_xen0_x86_64	Fri Sep 12 11:28:00 2008 +0900
     5.2 +++ b/buildconfigs/linux-defconfig_xen0_x86_64	Tue Sep 16 21:25:54 2008 +0900
     5.3 @@ -204,7 +204,7 @@ CONFIG_PCI_MMCONFIG=y
     5.4  CONFIG_XEN_PCIDEV_FRONTEND=y
     5.5  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
     5.6  # CONFIG_PCIEPORTBUS is not set
     5.7 -# CONFIG_PCI_MSI is not set
     5.8 +CONFIG_PCI_MSI=y
     5.9  # CONFIG_PCI_DEBUG is not set
    5.10  
    5.11  #
     6.1 --- a/buildconfigs/linux-defconfig_xen_x86_32	Fri Sep 12 11:28:00 2008 +0900
     6.2 +++ b/buildconfigs/linux-defconfig_xen_x86_32	Tue Sep 16 21:25:54 2008 +0900
     6.3 @@ -254,7 +254,7 @@ CONFIG_PCI_MMCONFIG=y
     6.4  CONFIG_XEN_PCIDEV_FRONTEND=y
     6.5  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
     6.6  # CONFIG_PCIEPORTBUS is not set
     6.7 -# CONFIG_PCI_MSI is not set
     6.8 +CONFIG_PCI_MSI=y
     6.9  # CONFIG_PCI_DEBUG is not set
    6.10  CONFIG_ISA_DMA_API=y
    6.11  CONFIG_SCx200=m
     7.1 --- a/buildconfigs/linux-defconfig_xen_x86_64	Fri Sep 12 11:28:00 2008 +0900
     7.2 +++ b/buildconfigs/linux-defconfig_xen_x86_64	Tue Sep 16 21:25:54 2008 +0900
     7.3 @@ -209,7 +209,7 @@ CONFIG_PCI_DIRECT=y
     7.4  CONFIG_XEN_PCIDEV_FRONTEND=y
     7.5  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
     7.6  # CONFIG_PCIEPORTBUS is not set
     7.7 -# CONFIG_PCI_MSI is not set
     7.8 +CONFIG_PCI_MSI=y
     7.9  # CONFIG_PCI_DEBUG is not set
    7.10  
    7.11  #
     8.1 --- a/drivers/acpi/processor_core.c	Fri Sep 12 11:28:00 2008 +0900
     8.2 +++ b/drivers/acpi/processor_core.c	Tue Sep 16 21:25:54 2008 +0900
     8.3 @@ -67,6 +67,7 @@
     8.4  #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
     8.5  #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
     8.6  #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
     8.7 +#define ACPI_PROCESSOR_NOTIFY_THROTTLING     0x82
     8.8  
     8.9  #define ACPI_PROCESSOR_LIMIT_USER	0
    8.10  #define ACPI_PROCESSOR_LIMIT_THERMAL	1
    8.11 @@ -618,6 +619,10 @@ static void acpi_processor_notify(acpi_h
    8.12  		acpi_processor_cst_has_changed(pr);
    8.13  		acpi_bus_generate_event(device, event, 0);
    8.14  		break;
    8.15 +	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
    8.16 +		acpi_processor_tstate_has_changed(pr);
    8.17 +		acpi_bus_generate_event(device, event, 0);
    8.18 +		break;
    8.19  	default:
    8.20  		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
    8.21  				  "Unsupported event [0x%x]\n", event));
    8.22 @@ -965,6 +970,8 @@ static int __init acpi_processor_init(vo
    8.23  
    8.24  	acpi_processor_ppc_init();
    8.25  
    8.26 +	acpi_processor_throttling_init();
    8.27 +
    8.28  	return 0;
    8.29  }
    8.30  
     9.1 --- a/drivers/acpi/processor_extcntl.c	Fri Sep 12 11:28:00 2008 +0900
     9.2 +++ b/drivers/acpi/processor_extcntl.c	Tue Sep 16 21:25:54 2008 +0900
     9.3 @@ -203,13 +203,22 @@ static int processor_extcntl_get_perform
     9.4  	 * processor objects to external logic. In this case, it's preferred
     9.5  	 * to use ACPI ID instead.
     9.6  	 */
     9.7 -	pr->performance->domain_info.num_processors = 0;
     9.8 +	pdomain = &pr->performance->domain_info;
     9.9 +	pdomain->num_processors = 0;
    9.10  	ret = acpi_processor_get_psd(pr);
    9.11 -	if (ret < 0)
    9.12 -		goto err_out;
    9.13 +	if (ret < 0) {
    9.14 +		/*
    9.15 +		 * _PSD is optional - assume no coordination if absent (or
    9.16 +		 * broken), matching native kernels' behavior.
    9.17 +		 */
    9.18 +		pdomain->num_entries = ACPI_PSD_REV0_ENTRIES;
    9.19 +		pdomain->revision = ACPI_PSD_REV0_REVISION;
    9.20 +		pdomain->domain = pr->acpi_id;
    9.21 +		pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL;
    9.22 +		pdomain->num_processors = 1;
    9.23 +	}
    9.24  
    9.25  	/* Some sanity check */
    9.26 -	pdomain = &pr->performance->domain_info;
    9.27  	if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
    9.28  	    (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
    9.29  	    ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
    10.1 --- a/drivers/acpi/processor_throttling.c	Fri Sep 12 11:28:00 2008 +0900
    10.2 +++ b/drivers/acpi/processor_throttling.c	Tue Sep 16 21:25:54 2008 +0900
    10.3 @@ -29,6 +29,7 @@
    10.4  #include <linux/kernel.h>
    10.5  #include <linux/module.h>
    10.6  #include <linux/init.h>
    10.7 +#include <linux/sched.h>
    10.8  #include <linux/cpufreq.h>
    10.9  #include <linux/proc_fs.h>
   10.10  #include <linux/seq_file.h>
   10.11 @@ -41,21 +42,559 @@
   10.12  
   10.13  #define ACPI_PROCESSOR_COMPONENT        0x01000000
   10.14  #define ACPI_PROCESSOR_CLASS            "processor"
   10.15 -#define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
   10.16  #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
   10.17 -ACPI_MODULE_NAME("acpi_processor")
   10.18 +ACPI_MODULE_NAME("processor_throttling");
   10.19 +
   10.20 +struct throttling_tstate {
   10.21 +	unsigned int cpu;		/* cpu nr */
   10.22 +	int target_state;		/* target T-state */
   10.23 +};
   10.24 +
   10.25 +#define THROTTLING_PRECHANGE       (1)
   10.26 +#define THROTTLING_POSTCHANGE      (2)
   10.27 +
   10.28 +static int acpi_processor_get_throttling(struct acpi_processor *pr);
   10.29 +int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
   10.30 +
   10.31 +static int acpi_processor_update_tsd_coord(void)
   10.32 +{
   10.33 +	int count, count_target;
   10.34 +	int retval = 0;
   10.35 +	unsigned int i, j;
   10.36 +	cpumask_t covered_cpus;
   10.37 +	struct acpi_processor *pr, *match_pr;
   10.38 +	struct acpi_tsd_package *pdomain, *match_pdomain;
   10.39 +	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
   10.40 +
   10.41 +	/*
   10.42 +	 * Now that we have _TSD data from all CPUs, lets setup T-state
   10.43 +	 * coordination between all CPUs.
   10.44 +	 */
   10.45 +	for_each_possible_cpu(i) {
   10.46 +		pr = processors[i];
   10.47 +		if (!pr)
   10.48 +			continue;
   10.49 +
   10.50 +		/* Basic validity check for domain info */
   10.51 +		pthrottling = &(pr->throttling);
   10.52 +
   10.53 +		/*
   10.54 +		 * If tsd package for one cpu is invalid, the coordination
   10.55 +		 * among all CPUs is thought as invalid.
   10.56 +		 * Maybe it is ugly.
   10.57 +		 */
   10.58 +		if (!pthrottling->tsd_valid_flag) {
   10.59 +			retval = -EINVAL;
   10.60 +			break;
   10.61 +		}
   10.62 +	}
   10.63 +	if (retval)
   10.64 +		goto err_ret;
   10.65 +
   10.66 +	cpus_clear(covered_cpus);
   10.67 +	for_each_possible_cpu(i) {
   10.68 +		pr = processors[i];
   10.69 +		if (!pr)
   10.70 +			continue;
   10.71 +
   10.72 +		if (cpu_isset(i, covered_cpus))
   10.73 +			continue;
   10.74 +		pthrottling = &pr->throttling;
   10.75 +
   10.76 +		pdomain = &(pthrottling->domain_info);
   10.77 +		cpu_set(i, pthrottling->shared_cpu_map);
   10.78 +		cpu_set(i, covered_cpus);
   10.79 +		/*
   10.80 +		 * If the number of processor in the TSD domain is 1, it is
   10.81 +		 * unnecessary to parse the coordination for this CPU.
   10.82 +		 */
   10.83 +		if (pdomain->num_processors <= 1)
   10.84 +			continue;
   10.85 +
   10.86 +		/* Validate the Domain info */
   10.87 +		count_target = pdomain->num_processors;
   10.88 +		count = 1;
   10.89 +
   10.90 +		for_each_possible_cpu(j) {
   10.91 +			if (i == j)
   10.92 +				continue;
   10.93 +
   10.94 +			match_pr = processors[j];
   10.95 +			if (!match_pr)
   10.96 +				continue;
   10.97 +
   10.98 +			match_pthrottling = &(match_pr->throttling);
   10.99 +			match_pdomain = &(match_pthrottling->domain_info);
  10.100 +			if (match_pdomain->domain != pdomain->domain)
  10.101 +				continue;
  10.102 +
  10.103 +			/* Here i and j are in the same domain.
  10.104 +			 * If two TSD packages have the same domain, they
  10.105 +			 * should have the same num_porcessors and
  10.106 +			 * coordination type. Otherwise it will be regarded
  10.107 +			 * as illegal.
  10.108 +			 */
  10.109 +			if (match_pdomain->num_processors != count_target) {
  10.110 +				retval = -EINVAL;
  10.111 +				goto err_ret;
  10.112 +			}
  10.113 +
  10.114 +			if (pdomain->coord_type != match_pdomain->coord_type) {
  10.115 +				retval = -EINVAL;
  10.116 +				goto err_ret;
  10.117 +			}
  10.118 +
  10.119 +			cpu_set(j, covered_cpus);
  10.120 +			cpu_set(j, pthrottling->shared_cpu_map);
  10.121 +			count++;
  10.122 +		}
  10.123 +		for_each_possible_cpu(j) {
  10.124 +			if (i == j)
  10.125 +				continue;
  10.126 +
  10.127 +			match_pr = processors[j];
  10.128 +			if (!match_pr)
  10.129 +				continue;
  10.130 +
  10.131 +			match_pthrottling = &(match_pr->throttling);
  10.132 +			match_pdomain = &(match_pthrottling->domain_info);
  10.133 +			if (match_pdomain->domain != pdomain->domain)
  10.134 +				continue;
  10.135 +
  10.136 +			/*
  10.137 +			 * If some CPUS have the same domain, they
  10.138 +			 * will have the same shared_cpu_map.
  10.139 +			 */
  10.140 +			match_pthrottling->shared_cpu_map =
  10.141 +				pthrottling->shared_cpu_map;
  10.142 +		}
  10.143 +	}
  10.144 +
  10.145 +err_ret:
  10.146 +	for_each_possible_cpu(i) {
  10.147 +		pr = processors[i];
  10.148 +		if (!pr)
  10.149 +			continue;
  10.150 +
  10.151 +		/*
  10.152 +		 * Assume no coordination on any error parsing domain info.
  10.153 +		 * The coordination type will be forced as SW_ALL.
  10.154 +		 */
  10.155 +		if (retval) {
  10.156 +			pthrottling = &(pr->throttling);
  10.157 +			cpus_clear(pthrottling->shared_cpu_map);
  10.158 +			cpu_set(i, pthrottling->shared_cpu_map);
  10.159 +			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  10.160 +		}
  10.161 +	}
  10.162 +
  10.163 +	return retval;
  10.164 +}
  10.165 +
  10.166 +/*
  10.167 + * Update the T-state coordination after the _TSD
  10.168 + * data for all cpus is obtained.
  10.169 + */
  10.170 +void acpi_processor_throttling_init(void)
  10.171 +{
  10.172 +	if (acpi_processor_update_tsd_coord())
  10.173 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  10.174 +			"Assume no T-state coordination\n"));
  10.175 +
  10.176 +	return;
  10.177 +}
  10.178 +
  10.179 +static int acpi_processor_throttling_notifier(unsigned long event, void *data)
  10.180 +{
  10.181 +	struct throttling_tstate *p_tstate = data;
  10.182 +	struct acpi_processor *pr;
  10.183 +	unsigned int cpu ;
  10.184 +	int target_state;
  10.185 +	struct acpi_processor_limit *p_limit;
  10.186 +	struct acpi_processor_throttling *p_throttling;
  10.187 +
  10.188 +	cpu = p_tstate->cpu;
  10.189 +	pr = processors[cpu];
  10.190 +	if (!pr) {
  10.191 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
  10.192 +		return 0;
  10.193 +	}
  10.194 +	if (!pr->flags.throttling) {
  10.195 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
  10.196 +				"unsupported on CPU %d\n", cpu));
  10.197 +		return 0;
  10.198 +	}
  10.199 +	target_state = p_tstate->target_state;
  10.200 +	p_throttling = &(pr->throttling);
  10.201 +	switch (event) {
  10.202 +	case THROTTLING_PRECHANGE:
  10.203 +		/*
  10.204 +		 * Prechange event is used to choose one proper t-state,
  10.205 +		 * which meets the limits of thermal, user and _TPC.
  10.206 +		 */
  10.207 +		p_limit = &pr->limit;
  10.208 +		if (p_limit->thermal.tx > target_state)
  10.209 +			target_state = p_limit->thermal.tx;
  10.210 +		if (p_limit->user.tx > target_state)
  10.211 +			target_state = p_limit->user.tx;
  10.212 +		if (pr->throttling_platform_limit > target_state)
  10.213 +			target_state = pr->throttling_platform_limit;
  10.214 +		if (target_state >= p_throttling->state_count) {
  10.215 +			printk(KERN_WARNING
  10.216 +				"Exceed the limit of T-state \n");
  10.217 +			target_state = p_throttling->state_count - 1;
  10.218 +		}
  10.219 +		p_tstate->target_state = target_state;
  10.220 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
  10.221 +				"target T-state of CPU %d is T%d\n",
  10.222 +				cpu, target_state));
  10.223 +		break;
  10.224 +	case THROTTLING_POSTCHANGE:
  10.225 +		/*
  10.226 +		 * Postchange event is only used to update the
  10.227 +		 * T-state flag of acpi_processor_throttling.
  10.228 +		 */
  10.229 +		p_throttling->state = target_state;
  10.230 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
  10.231 +				"CPU %d is switched to T%d\n",
  10.232 +				cpu, target_state));
  10.233 +		break;
  10.234 +	default:
  10.235 +		printk(KERN_WARNING
  10.236 +			"Unsupported Throttling notifier event\n");
  10.237 +		break;
  10.238 +	}
  10.239 +
  10.240 +	return 0;
  10.241 +}
  10.242 +
  10.243 +/*
  10.244 + * _TPC - Throttling Present Capabilities
  10.245 + */
  10.246 +static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  10.247 +{
  10.248 +	acpi_status status = 0;
  10.249 +	unsigned long tpc = 0;
  10.250 +
  10.251 +	if (!pr)
  10.252 +		return -EINVAL;
  10.253 +	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
  10.254 +	if (ACPI_FAILURE(status)) {
  10.255 +		if (status != AE_NOT_FOUND) {
  10.256 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
  10.257 +		}
  10.258 +		return -ENODEV;
  10.259 +	}
  10.260 +	pr->throttling_platform_limit = (int)tpc;
  10.261 +	return 0;
  10.262 +}
  10.263 +
  10.264 +int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  10.265 +{
  10.266 +	int result = 0;
  10.267 +	int throttling_limit;
  10.268 +	int current_state;
  10.269 +	struct acpi_processor_limit *limit;
  10.270 +	int target_state;
  10.271 +
  10.272 +	result = acpi_processor_get_platform_limit(pr);
  10.273 +	if (result) {
  10.274 +		/* Throttling Limit is unsupported */
  10.275 +		return result;
  10.276 +	}
  10.277 +
  10.278 +	throttling_limit = pr->throttling_platform_limit;
  10.279 +	if (throttling_limit >= pr->throttling.state_count) {
  10.280 +		/* Uncorrect Throttling Limit */
  10.281 +		return -EINVAL;
  10.282 +	}
  10.283 +
  10.284 +	current_state = pr->throttling.state;
  10.285 +	if (current_state > throttling_limit) {
  10.286 +		/*
  10.287 +		 * The current state can meet the requirement of
  10.288 +		 * _TPC limit. But it is reasonable that OSPM changes
  10.289 +		 * t-states from high to low for better performance.
  10.290 +		 * Of course the limit condition of thermal
  10.291 +		 * and user should be considered.
  10.292 +		 */
  10.293 +		limit = &pr->limit;
  10.294 +		target_state = throttling_limit;
  10.295 +		if (limit->thermal.tx > target_state)
  10.296 +			target_state = limit->thermal.tx;
  10.297 +		if (limit->user.tx > target_state)
  10.298 +			target_state = limit->user.tx;
  10.299 +	} else if (current_state == throttling_limit) {
  10.300 +		/*
  10.301 +		 * Unnecessary to change the throttling state
  10.302 +		 */
  10.303 +		return 0;
  10.304 +	} else {
  10.305 +		/*
  10.306 +		 * If the current state is lower than the limit of _TPC, it
  10.307 +		 * will be forced to switch to the throttling state defined
  10.308 +		 * by throttling_platfor_limit.
  10.309 +		 * Because the previous state meets with the limit condition
  10.310 +		 * of thermal and user, it is unnecessary to check it again.
  10.311 +		 */
  10.312 +		target_state = throttling_limit;
  10.313 +	}
  10.314 +	return acpi_processor_set_throttling(pr, target_state);
  10.315 +}
  10.316 +
  10.317 +/*
  10.318 + * _PTC - Processor Throttling Control (and status) register location
  10.319 + */
  10.320 +static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
  10.321 +{
  10.322 +	int result = 0;
  10.323 +	acpi_status status = 0;
  10.324 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  10.325 +	union acpi_object *ptc = NULL;
  10.326 +	union acpi_object obj = { 0 };
  10.327 +	struct acpi_processor_throttling *throttling;
  10.328 +
  10.329 +	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
  10.330 +	if (ACPI_FAILURE(status)) {
  10.331 +		if (status != AE_NOT_FOUND) {
  10.332 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
  10.333 +		}
  10.334 +		return -ENODEV;
  10.335 +	}
  10.336 +
  10.337 +	ptc = (union acpi_object *)buffer.pointer;
  10.338 +	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
  10.339 +	    || (ptc->package.count != 2)) {
  10.340 +		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
  10.341 +		result = -EFAULT;
  10.342 +		goto end;
  10.343 +	}
  10.344 +
  10.345 +	/*
  10.346 +	 * control_register
  10.347 +	 */
  10.348 +
  10.349 +	obj = ptc->package.elements[0];
  10.350 +
  10.351 +	if ((obj.type != ACPI_TYPE_BUFFER)
  10.352 +	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  10.353 +	    || (obj.buffer.pointer == NULL)) {
  10.354 +		printk(KERN_ERR PREFIX
  10.355 +		       "Invalid _PTC data (control_register)\n");
  10.356 +		result = -EFAULT;
  10.357 +		goto end;
  10.358 +	}
  10.359 +	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
  10.360 +	       sizeof(struct acpi_ptc_register));
  10.361 +
  10.362 +	/*
  10.363 +	 * status_register
  10.364 +	 */
  10.365 +
  10.366 +	obj = ptc->package.elements[1];
  10.367 +
  10.368 +	if ((obj.type != ACPI_TYPE_BUFFER)
  10.369 +	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  10.370 +	    || (obj.buffer.pointer == NULL)) {
  10.371 +		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
  10.372 +		result = -EFAULT;
  10.373 +		goto end;
  10.374 +	}
  10.375 +
  10.376 +	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
  10.377 +	       sizeof(struct acpi_ptc_register));
  10.378 +
  10.379 +	throttling = &pr->throttling;
  10.380 +
  10.381 +	if ((throttling->control_register.bit_width +
  10.382 +		throttling->control_register.bit_offset) > 32) {
  10.383 +		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
  10.384 +		result = -EFAULT;
  10.385 +		goto end;
  10.386 +	}
  10.387 +
  10.388 +	if ((throttling->status_register.bit_width +
  10.389 +		throttling->status_register.bit_offset) > 32) {
  10.390 +		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
  10.391 +		result = -EFAULT;
  10.392 +		goto end;
  10.393 +	}
  10.394 +
  10.395 +      end:
  10.396 +	kfree(buffer.pointer);
  10.397 +
  10.398 +	return result;
  10.399 +}
  10.400 +
  10.401 +/*
  10.402 + * _TSS - Throttling Supported States
  10.403 + */
  10.404 +static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
  10.405 +{
  10.406 +	int result = 0;
  10.407 +	acpi_status status = AE_OK;
  10.408 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  10.409 +	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  10.410 +	struct acpi_buffer state = { 0, NULL };
  10.411 +	union acpi_object *tss = NULL;
  10.412 +	int i;
  10.413 +
  10.414 +	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
  10.415 +	if (ACPI_FAILURE(status)) {
  10.416 +		if (status != AE_NOT_FOUND) {
  10.417 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
  10.418 +		}
  10.419 +		return -ENODEV;
  10.420 +	}
  10.421 +
  10.422 +	tss = buffer.pointer;
  10.423 +	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
  10.424 +		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
  10.425 +		result = -EFAULT;
  10.426 +		goto end;
  10.427 +	}
  10.428 +
  10.429 +	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  10.430 +			  tss->package.count));
  10.431 +
  10.432 +	pr->throttling.state_count = tss->package.count;
  10.433 +	pr->throttling.states_tss =
  10.434 +	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
  10.435 +		    GFP_KERNEL);
  10.436 +	if (!pr->throttling.states_tss) {
  10.437 +		result = -ENOMEM;
  10.438 +		goto end;
  10.439 +	}
  10.440 +
  10.441 +	for (i = 0; i < pr->throttling.state_count; i++) {
  10.442 +
  10.443 +		struct acpi_processor_tx_tss *tx =
  10.444 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
  10.445 +						      states_tss[i]);
  10.446 +
  10.447 +		state.length = sizeof(struct acpi_processor_tx_tss);
  10.448 +		state.pointer = tx;
  10.449 +
  10.450 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  10.451 +
  10.452 +		status = acpi_extract_package(&(tss->package.elements[i]),
  10.453 +					      &format, &state);
  10.454 +		if (ACPI_FAILURE(status)) {
  10.455 +			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
  10.456 +			result = -EFAULT;
  10.457 +			kfree(pr->throttling.states_tss);
  10.458 +			goto end;
  10.459 +		}
  10.460 +
  10.461 +		if (!tx->freqpercentage) {
  10.462 +			printk(KERN_ERR PREFIX
  10.463 +			       "Invalid _TSS data: freq is zero\n");
  10.464 +			result = -EFAULT;
  10.465 +			kfree(pr->throttling.states_tss);
  10.466 +			goto end;
  10.467 +		}
  10.468 +	}
  10.469 +
  10.470 +      end:
  10.471 +	kfree(buffer.pointer);
  10.472 +
  10.473 +	return result;
  10.474 +}
  10.475 +
  10.476 +/*
  10.477 + * _TSD - T-State Dependencies
  10.478 + */
  10.479 +static int acpi_processor_get_tsd(struct acpi_processor *pr)
  10.480 +{
  10.481 +	int result = 0;
  10.482 +	acpi_status status = AE_OK;
  10.483 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  10.484 +	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  10.485 +	struct acpi_buffer state = { 0, NULL };
  10.486 +	union acpi_object *tsd = NULL;
  10.487 +	struct acpi_tsd_package *pdomain;
  10.488 +	struct acpi_processor_throttling *pthrottling;
  10.489 +
  10.490 +	pthrottling = &pr->throttling;
  10.491 +	pthrottling->tsd_valid_flag = 0;
  10.492 +
  10.493 +	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
  10.494 +	if (ACPI_FAILURE(status)) {
  10.495 +		if (status != AE_NOT_FOUND) {
  10.496 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
  10.497 +		}
  10.498 +		return -ENODEV;
  10.499 +	}
  10.500 +
  10.501 +	tsd = buffer.pointer;
  10.502 +	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
  10.503 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  10.504 +		result = -EFAULT;
  10.505 +		goto end;
  10.506 +	}
  10.507 +
  10.508 +	if (tsd->package.count != 1) {
  10.509 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  10.510 +		result = -EFAULT;
  10.511 +		goto end;
  10.512 +	}
  10.513 +
  10.514 +	pdomain = &(pr->throttling.domain_info);
  10.515 +
  10.516 +	state.length = sizeof(struct acpi_tsd_package);
  10.517 +	state.pointer = pdomain;
  10.518 +
  10.519 +	status = acpi_extract_package(&(tsd->package.elements[0]),
  10.520 +				      &format, &state);
  10.521 +	if (ACPI_FAILURE(status)) {
  10.522 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  10.523 +		result = -EFAULT;
  10.524 +		goto end;
  10.525 +	}
  10.526 +
  10.527 +	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
  10.528 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
  10.529 +		result = -EFAULT;
  10.530 +		goto end;
  10.531 +	}
  10.532 +
  10.533 +	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
  10.534 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
  10.535 +		result = -EFAULT;
  10.536 +		goto end;
  10.537 +	}
  10.538 +
  10.539 +	pthrottling = &pr->throttling;
  10.540 +	pthrottling->tsd_valid_flag = 1;
  10.541 +	pthrottling->shared_type = pdomain->coord_type;
  10.542 +	cpu_set(pr->id, pthrottling->shared_cpu_map);
  10.543 +	/*
  10.544 +	 * If the coordination type is not defined in ACPI spec,
  10.545 +	 * the tsd_valid_flag will be clear and coordination type
  10.546 +	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
  10.547 +	 */
  10.548 +	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  10.549 +		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  10.550 +		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  10.551 +		pthrottling->tsd_valid_flag = 0;
  10.552 +		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  10.553 +	}
  10.554 +
  10.555 +      end:
  10.556 +	kfree(buffer.pointer);
  10.557 +	return result;
  10.558 +}
  10.559  
  10.560  /* --------------------------------------------------------------------------
  10.561                                Throttling Control
  10.562     -------------------------------------------------------------------------- */
  10.563 -static int acpi_processor_get_throttling(struct acpi_processor *pr)
  10.564 +static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
  10.565  {
  10.566  	int state = 0;
  10.567  	u32 value = 0;
  10.568  	u32 duty_mask = 0;
  10.569  	u32 duty_value = 0;
  10.570  
  10.571 -
  10.572  	if (!pr)
  10.573  		return -EINVAL;
  10.574  
  10.575 @@ -95,13 +634,259 @@ static int acpi_processor_get_throttling
  10.576  	return 0;
  10.577  }
  10.578  
  10.579 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  10.580 +#ifdef CONFIG_X86
  10.581 +static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  10.582 +					acpi_integer * value)
  10.583 +{
  10.584 +	struct cpuinfo_x86 *c;
  10.585 +	u64 msr_high, msr_low;
  10.586 +	unsigned int cpu;
  10.587 +	u64 msr = 0;
  10.588 +	int ret = -1;
  10.589 +
  10.590 +	cpu = pr->id;
  10.591 +	c = &cpu_data[cpu];
  10.592 +
  10.593 +	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  10.594 +		!cpu_has(c, X86_FEATURE_ACPI)) {
  10.595 +		printk(KERN_ERR PREFIX
  10.596 +			"HARDWARE addr space,NOT supported yet\n");
  10.597 +	} else {
  10.598 +		msr_low = 0;
  10.599 +		msr_high = 0;
  10.600 +		rdmsr_safe(MSR_IA32_THERM_CONTROL,
  10.601 +			(u32 *)&msr_low , (u32 *) &msr_high);
  10.602 +		msr = (msr_high << 32) | msr_low;
  10.603 +		*value = (acpi_integer) msr;
  10.604 +		ret = 0;
  10.605 +	}
  10.606 +	return ret;
  10.607 +}
  10.608 +
  10.609 +static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  10.610 +{
  10.611 +	struct cpuinfo_x86 *c;
  10.612 +	unsigned int cpu;
  10.613 +	int ret = -1;
  10.614 +	u64 msr;
  10.615 +
  10.616 +	cpu = pr->id;
  10.617 +	c = &cpu_data[cpu];
  10.618 +
  10.619 +	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  10.620 +		!cpu_has(c, X86_FEATURE_ACPI)) {
  10.621 +		printk(KERN_ERR PREFIX
  10.622 +			"HARDWARE addr space,NOT supported yet\n");
  10.623 +	} else {
  10.624 +		msr = value;
  10.625 +		wrmsr_safe(MSR_IA32_THERM_CONTROL,
  10.626 +			   (u32)msr, (u32)(msr >> 32));
  10.627 +		ret = 0;
  10.628 +	}
  10.629 +	return ret;
  10.630 +}
  10.631 +#else
  10.632 +static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  10.633 +				acpi_integer * value)
  10.634 +{
  10.635 +	printk(KERN_ERR PREFIX
  10.636 +		"HARDWARE addr space,NOT supported yet\n");
  10.637 +	return -1;
  10.638 +}
  10.639 +
  10.640 +static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  10.641 +{
  10.642 +	printk(KERN_ERR PREFIX
  10.643 +		"HARDWARE addr space,NOT supported yet\n");
  10.644 +	return -1;
  10.645 +}
  10.646 +#endif
  10.647 +
  10.648 +static int acpi_read_throttling_status(struct acpi_processor *pr,
  10.649 +					acpi_integer *value)
  10.650 +{
  10.651 +	u32 bit_width, bit_offset;
  10.652 +	u64 ptc_value;
  10.653 +	u64 ptc_mask;
  10.654 +	struct acpi_processor_throttling *throttling;
  10.655 +	int ret = -1;
  10.656 +
  10.657 +	throttling = &pr->throttling;
  10.658 +	switch (throttling->status_register.space_id) {
  10.659 +	case ACPI_ADR_SPACE_SYSTEM_IO:
  10.660 +		ptc_value = 0;
  10.661 +		bit_width = throttling->status_register.bit_width;
  10.662 +		bit_offset = throttling->status_register.bit_offset;
  10.663 +
  10.664 +		acpi_os_read_port((acpi_io_address) throttling->status_register.
  10.665 +				  address, (u32 *) &ptc_value,
  10.666 +				  (u32) (bit_width + bit_offset));
  10.667 +		ptc_mask = (1 << bit_width) - 1;
  10.668 +		*value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
  10.669 +		ret = 0;
  10.670 +		break;
  10.671 +	case ACPI_ADR_SPACE_FIXED_HARDWARE:
  10.672 +		ret = acpi_throttling_rdmsr(pr, value);
  10.673 +		break;
  10.674 +	default:
  10.675 +		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  10.676 +		       (u32) (throttling->status_register.space_id));
  10.677 +	}
  10.678 +	return ret;
  10.679 +}
  10.680 +
  10.681 +static int acpi_write_throttling_state(struct acpi_processor *pr,
  10.682 +				acpi_integer value)
  10.683 +{
  10.684 +	u32 bit_width, bit_offset;
  10.685 +	u64 ptc_value;
  10.686 +	u64 ptc_mask;
  10.687 +	struct acpi_processor_throttling *throttling;
  10.688 +	int ret = -1;
  10.689 +
  10.690 +	throttling = &pr->throttling;
  10.691 +	switch (throttling->control_register.space_id) {
  10.692 +	case ACPI_ADR_SPACE_SYSTEM_IO:
  10.693 +		bit_width = throttling->control_register.bit_width;
  10.694 +		bit_offset = throttling->control_register.bit_offset;
  10.695 +		ptc_mask = (1 << bit_width) - 1;
  10.696 +		ptc_value = value & ptc_mask;
  10.697 +
  10.698 +		acpi_os_write_port((acpi_io_address) throttling->
  10.699 +					control_register.address,
  10.700 +					(u32) (ptc_value << bit_offset),
  10.701 +					(u32) (bit_width + bit_offset));
  10.702 +		ret = 0;
  10.703 +		break;
  10.704 +	case ACPI_ADR_SPACE_FIXED_HARDWARE:
  10.705 +		ret = acpi_throttling_wrmsr(pr, value);
  10.706 +		break;
  10.707 +	default:
  10.708 +		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  10.709 +		       (u32) (throttling->control_register.space_id));
  10.710 +	}
  10.711 +	return ret;
  10.712 +}
  10.713 +
  10.714 +static int acpi_get_throttling_state(struct acpi_processor *pr,
  10.715 +				acpi_integer value)
  10.716 +{
  10.717 +	int i;
  10.718 +
  10.719 +	for (i = 0; i < pr->throttling.state_count; i++) {
  10.720 +		struct acpi_processor_tx_tss *tx =
  10.721 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
  10.722 +						      states_tss[i]);
  10.723 +		if (tx->control == value)
  10.724 +			break;
  10.725 +	}
  10.726 +	if (i > pr->throttling.state_count)
  10.727 +		i = -1;
  10.728 +	return i;
  10.729 +}
  10.730 +
  10.731 +static int acpi_get_throttling_value(struct acpi_processor *pr,
  10.732 +			int state, acpi_integer *value)
  10.733 +{
  10.734 +	int ret = -1;
  10.735 +
  10.736 +	if (state >= 0 && state <= pr->throttling.state_count) {
  10.737 +		struct acpi_processor_tx_tss *tx =
  10.738 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
  10.739 +						      states_tss[state]);
  10.740 +		*value = tx->control;
  10.741 +		ret = 0;
  10.742 +	}
  10.743 +	return ret;
  10.744 +}
  10.745 +
  10.746 +static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
  10.747 +{
  10.748 +	int state = 0;
  10.749 +	int ret;
  10.750 +	acpi_integer value;
  10.751 +
  10.752 +	if (!pr)
  10.753 +		return -EINVAL;
  10.754 +
  10.755 +	if (!pr->flags.throttling)
  10.756 +		return -ENODEV;
  10.757 +
  10.758 +	pr->throttling.state = 0;
  10.759 +
  10.760 +	value = 0;
  10.761 +	ret = acpi_read_throttling_status(pr, &value);
  10.762 +	if (ret >= 0) {
  10.763 +		state = acpi_get_throttling_state(pr, value);
  10.764 +		pr->throttling.state = state;
  10.765 +	}
  10.766 +
  10.767 +	return 0;
  10.768 +}
  10.769 +
  10.770 +static int acpi_processor_get_throttling(struct acpi_processor *pr)
  10.771 +{
  10.772 +	cpumask_t saved_mask;
  10.773 +	int ret;
  10.774 +
  10.775 +	if (!pr)
  10.776 +		return -EINVAL;
  10.777 +
  10.778 +	if (!pr->flags.throttling)
  10.779 +		return -ENODEV;
  10.780 +	/*
  10.781 +	 * Migrate task to the cpu pointed by pr.
  10.782 +	 */
  10.783 +	saved_mask = current->cpus_allowed;
  10.784 +	set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  10.785 +	ret = pr->throttling.acpi_processor_get_throttling(pr);
  10.786 +	/* restore the previous state */
  10.787 +	set_cpus_allowed(current, saved_mask);
  10.788 +
  10.789 +	return ret;
  10.790 +}
  10.791 +
  10.792 +static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
  10.793 +{
  10.794 +	int i, step;
  10.795 +
  10.796 +	if (!pr->throttling.address) {
  10.797 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  10.798 +		return -EINVAL;
  10.799 +	} else if (!pr->throttling.duty_width) {
  10.800 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  10.801 +		return -EINVAL;
  10.802 +	}
  10.803 +	/* TBD: Support duty_cycle values that span bit 4. */
  10.804 +	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  10.805 +		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  10.806 +		return -EINVAL;
  10.807 +	}
  10.808 +
  10.809 +	pr->throttling.state_count = 1 << acpi_fadt.duty_width;
  10.810 +
  10.811 +	/*
  10.812 +	 * Compute state values. Note that throttling displays a linear power
  10.813 +	 * performance relationship (at 50% performance the CPU will consume
  10.814 +	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
  10.815 +	 */
  10.816 +
  10.817 +	step = (1000 / pr->throttling.state_count);
  10.818 +
  10.819 +	for (i = 0; i < pr->throttling.state_count; i++) {
  10.820 +		pr->throttling.states[i].performance = 1000 - step * i;
  10.821 +		pr->throttling.states[i].power = 1000 - step * i;
  10.822 +	}
  10.823 +	return 0;
  10.824 +}
  10.825 +
  10.826 +static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
  10.827 +					      int state)
  10.828  {
  10.829  	u32 value = 0;
  10.830  	u32 duty_mask = 0;
  10.831  	u32 duty_value = 0;
  10.832  
  10.833 -
  10.834  	if (!pr)
  10.835  		return -EINVAL;
  10.836  
  10.837 @@ -114,6 +899,8 @@ int acpi_processor_set_throttling(struct
  10.838  	if (state == pr->throttling.state)
  10.839  		return 0;
  10.840  
  10.841 +	if (state < pr->throttling_platform_limit)
  10.842 +		return -EPERM;
  10.843  	/*
  10.844  	 * Calculate the duty_value and duty_mask.
  10.845  	 */
  10.846 @@ -166,12 +953,135 @@ int acpi_processor_set_throttling(struct
  10.847  	return 0;
  10.848  }
  10.849  
  10.850 +static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
  10.851 +					     int state)
  10.852 +{
  10.853 +	int ret;
  10.854 +	acpi_integer value;
  10.855 +
  10.856 +	if (!pr)
  10.857 +		return -EINVAL;
  10.858 +
  10.859 +	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  10.860 +		return -EINVAL;
  10.861 +
  10.862 +	if (!pr->flags.throttling)
  10.863 +		return -ENODEV;
  10.864 +
  10.865 +	if (state == pr->throttling.state)
  10.866 +		return 0;
  10.867 +
  10.868 +	if (state < pr->throttling_platform_limit)
  10.869 +		return -EPERM;
  10.870 +
  10.871 +	value = 0;
  10.872 +	ret = acpi_get_throttling_value(pr, state, &value);
  10.873 +	if (ret >= 0) {
  10.874 +		acpi_write_throttling_state(pr, value);
  10.875 +		pr->throttling.state = state;
  10.876 +	}
  10.877 +
  10.878 +	return 0;
  10.879 +}
  10.880 +
  10.881 +int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  10.882 +{
  10.883 +	cpumask_t saved_mask;
  10.884 +	int ret = 0;
  10.885 +	unsigned int i;
  10.886 +	struct acpi_processor *match_pr;
  10.887 +	struct acpi_processor_throttling *p_throttling;
  10.888 +	struct throttling_tstate t_state;
  10.889 +	cpumask_t online_throttling_cpus;
  10.890 +
  10.891 +	if (!pr)
  10.892 +		return -EINVAL;
  10.893 +
  10.894 +	if (!pr->flags.throttling)
  10.895 +		return -ENODEV;
  10.896 +
  10.897 +	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  10.898 +		return -EINVAL;
  10.899 +
  10.900 +	saved_mask = current->cpus_allowed;
  10.901 +	t_state.target_state = state;
  10.902 +	p_throttling = &(pr->throttling);
  10.903 +	cpus_and(online_throttling_cpus, cpu_online_map,
  10.904 +			p_throttling->shared_cpu_map);
  10.905 +	/*
  10.906 +	 * The throttling notifier will be called for every
  10.907 +	 * affected cpu in order to get one proper T-state.
  10.908 +	 * The notifier event is THROTTLING_PRECHANGE.
  10.909 +	 */
  10.910 +	for_each_cpu_mask(i, online_throttling_cpus) {
  10.911 +		t_state.cpu = i;
  10.912 +		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
  10.913 +							&t_state);
  10.914 +	}
  10.915 +	/*
  10.916 +	 * The function of acpi_processor_set_throttling will be called
  10.917 +	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
  10.918 +	 * it is necessary to call it for every affected cpu. Otherwise
  10.919 +	 * it can be called only for the cpu pointed by pr.
  10.920 +	 */
  10.921 +	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
  10.922 +		set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  10.923 +		ret = p_throttling->acpi_processor_set_throttling(pr,
  10.924 +						t_state.target_state);
  10.925 +	} else {
  10.926 +		/*
  10.927 +		 * When the T-state coordination is SW_ALL or HW_ALL,
  10.928 +		 * it is necessary to set T-state for every affected
  10.929 +		 * cpus.
  10.930 +		 */
  10.931 +		for_each_cpu_mask(i, online_throttling_cpus) {
  10.932 +			match_pr = processors[i];
  10.933 +			/*
  10.934 +			 * If the pointer is invalid, we will report the
  10.935 +			 * error message and continue.
  10.936 +			 */
  10.937 +			if (!match_pr) {
  10.938 +				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  10.939 +					"Invalid Pointer for CPU %d\n", i));
  10.940 +				continue;
  10.941 +			}
  10.942 +			/*
  10.943 +			 * If the throttling control is unsupported on CPU i,
  10.944 +			 * we will report the error message and continue.
  10.945 +			 */
  10.946 +			if (!match_pr->flags.throttling) {
  10.947 +				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  10.948 +					"Throttling Controll is unsupported "
  10.949 +					"on CPU %d\n", i));
  10.950 +				continue;
  10.951 +			}
  10.952 +			t_state.cpu = i;
  10.953 +			set_cpus_allowed(current, cpumask_of_cpu(i));
  10.954 +			ret = match_pr->throttling.
  10.955 +				acpi_processor_set_throttling(
  10.956 +				match_pr, t_state.target_state);
  10.957 +		}
  10.958 +	}
  10.959 +	/*
  10.960 +	 * After the set_throttling is called, the
  10.961 +	 * throttling notifier is called for every
  10.962 +	 * affected cpu to update the T-states.
  10.963 +	 * The notifier event is THROTTLING_POSTCHANGE
  10.964 +	 */
  10.965 +	for_each_cpu_mask(i, online_throttling_cpus) {
  10.966 +		t_state.cpu = i;
  10.967 +		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
  10.968 +							&t_state);
  10.969 +	}
  10.970 +	/* restore the previous state */
  10.971 +	set_cpus_allowed(current, saved_mask);
  10.972 +	return ret;
  10.973 +}
  10.974 +
  10.975  int acpi_processor_get_throttling_info(struct acpi_processor *pr)
  10.976  {
  10.977  	int result = 0;
  10.978 -	int step = 0;
  10.979 -	int i = 0;
  10.980 -
  10.981 +	struct acpi_processor_throttling *pthrottling;
  10.982  
  10.983  	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  10.984  			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
  10.985 @@ -182,19 +1092,36 @@ int acpi_processor_get_throttling_info(s
  10.986  	if (!pr)
  10.987  		return -EINVAL;
  10.988  
  10.989 -	/* TBD: Support ACPI 2.0 objects */
  10.990 +	/*
  10.991 +	 * Evaluate _PTC, _TSS and _TPC
  10.992 +	 * They must all be present or none of them can be used.
  10.993 +	 */
  10.994 +	if (acpi_processor_get_throttling_control(pr) ||
  10.995 +		acpi_processor_get_throttling_states(pr) ||
  10.996 +		acpi_processor_get_platform_limit(pr))
  10.997 +	{
  10.998 +		pr->throttling.acpi_processor_get_throttling =
  10.999 +		    &acpi_processor_get_throttling_fadt;
 10.1000 +		pr->throttling.acpi_processor_set_throttling =
 10.1001 +		    &acpi_processor_set_throttling_fadt;
 10.1002 +		if (acpi_processor_get_fadt_info(pr))
 10.1003 +			return 0;
 10.1004 +	} else {
 10.1005 +		pr->throttling.acpi_processor_get_throttling =
 10.1006 +		    &acpi_processor_get_throttling_ptc;
 10.1007 +		pr->throttling.acpi_processor_set_throttling =
 10.1008 +		    &acpi_processor_set_throttling_ptc;
 10.1009 +	}
 10.1010  
 10.1011 -	if (!pr->throttling.address) {
 10.1012 -		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
 10.1013 -		return 0;
 10.1014 -	} else if (!pr->throttling.duty_width) {
 10.1015 -		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
 10.1016 -		return 0;
 10.1017 -	}
 10.1018 -	/* TBD: Support duty_cycle values that span bit 4. */
 10.1019 -	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
 10.1020 -		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
 10.1021 -		return 0;
 10.1022 +	/*
 10.1023 +	 * If TSD package for one CPU can't be parsed successfully, it means
 10.1024 +	 * that this CPU will have no coordination with other CPUs.
 10.1025 +	 */
 10.1026 +	if (acpi_processor_get_tsd(pr)) {
 10.1027 +		pthrottling = &pr->throttling;
 10.1028 +		pthrottling->tsd_valid_flag = 0;
 10.1029 +		cpu_set(pr->id, pthrottling->shared_cpu_map);
 10.1030 +		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
 10.1031  	}
 10.1032  
 10.1033  	/*
 10.1034 @@ -208,21 +1135,6 @@ int acpi_processor_get_throttling_info(s
 10.1035  		return 0;
 10.1036  	}
 10.1037  
 10.1038 -	pr->throttling.state_count = 1 << acpi_fadt.duty_width;
 10.1039 -
 10.1040 -	/*
 10.1041 -	 * Compute state values. Note that throttling displays a linear power/
 10.1042 -	 * performance relationship (at 50% performance the CPU will consume
 10.1043 -	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
 10.1044 -	 */
 10.1045 -
 10.1046 -	step = (1000 / pr->throttling.state_count);
 10.1047 -
 10.1048 -	for (i = 0; i < pr->throttling.state_count; i++) {
 10.1049 -		pr->throttling.states[i].performance = step * i;
 10.1050 -		pr->throttling.states[i].power = step * i;
 10.1051 -	}
 10.1052 -
 10.1053  	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
 10.1054  			  pr->throttling.state_count));
 10.1055  
 10.1056 @@ -259,11 +1171,10 @@ int acpi_processor_get_throttling_info(s
 10.1057  static int acpi_processor_throttling_seq_show(struct seq_file *seq,
 10.1058  					      void *offset)
 10.1059  {
 10.1060 -	struct acpi_processor *pr = (struct acpi_processor *)seq->private;
 10.1061 +	struct acpi_processor *pr = seq->private;
 10.1062  	int i = 0;
 10.1063  	int result = 0;
 10.1064  
 10.1065 -
 10.1066  	if (!pr)
 10.1067  		goto end;
 10.1068  
 10.1069 @@ -281,15 +1192,27 @@ static int acpi_processor_throttling_seq
 10.1070  	}
 10.1071  
 10.1072  	seq_printf(seq, "state count:             %d\n"
 10.1073 -		   "active state:            T%d\n",
 10.1074 -		   pr->throttling.state_count, pr->throttling.state);
 10.1075 +		   "active state:            T%d\n"
 10.1076 +		   "state available: T%d to T%d\n",
 10.1077 +		   pr->throttling.state_count, pr->throttling.state,
 10.1078 +		   pr->throttling_platform_limit,
 10.1079 +		   pr->throttling.state_count - 1);
 10.1080  
 10.1081  	seq_puts(seq, "states:\n");
 10.1082 -	for (i = 0; i < pr->throttling.state_count; i++)
 10.1083 -		seq_printf(seq, "   %cT%d:                  %02d%%\n",
 10.1084 -			   (i == pr->throttling.state ? '*' : ' '), i,
 10.1085 -			   (pr->throttling.states[i].performance ? pr->
 10.1086 -			    throttling.states[i].performance / 10 : 0));
 10.1087 +	if (pr->throttling.acpi_processor_get_throttling ==
 10.1088 +			acpi_processor_get_throttling_fadt) {
 10.1089 +		for (i = 0; i < pr->throttling.state_count; i++)
 10.1090 +			seq_printf(seq, "   %cT%d:                  %02d%%\n",
 10.1091 +				   (i == pr->throttling.state ? '*' : ' '), i,
 10.1092 +				   (pr->throttling.states[i].performance ? pr->
 10.1093 +				    throttling.states[i].performance / 10 : 0));
 10.1094 +	} else {
 10.1095 +		for (i = 0; i < pr->throttling.state_count; i++)
 10.1096 +			seq_printf(seq, "   %cT%d:                  %02d%%\n",
 10.1097 +				   (i == pr->throttling.state ? '*' : ' '), i,
 10.1098 +				   (int)pr->throttling.states_tss[i].
 10.1099 +				   freqpercentage);
 10.1100 +	}
 10.1101  
 10.1102        end:
 10.1103  	return 0;
 10.1104 @@ -302,15 +1225,17 @@ static int acpi_processor_throttling_ope
 10.1105  			   PDE(inode)->data);
 10.1106  }
 10.1107  
 10.1108 -static ssize_t acpi_processor_write_throttling(struct file * file,
 10.1109 +static ssize_t acpi_processor_write_throttling(struct file *file,
 10.1110  					       const char __user * buffer,
 10.1111  					       size_t count, loff_t * data)
 10.1112  {
 10.1113  	int result = 0;
 10.1114 -	struct seq_file *m = (struct seq_file *)file->private_data;
 10.1115 -	struct acpi_processor *pr = (struct acpi_processor *)m->private;
 10.1116 -	char state_string[12] = { '\0' };
 10.1117 -
 10.1118 +	struct seq_file *m = file->private_data;
 10.1119 +	struct acpi_processor *pr = m->private;
 10.1120 +	char state_string[5] = "";
 10.1121 +	char *charp = NULL;
 10.1122 +	size_t state_val = 0;
 10.1123 +	char tmpbuf[5] = "";
 10.1124  
 10.1125  	if (!pr || (count > sizeof(state_string) - 1))
 10.1126  		return -EINVAL;
 10.1127 @@ -319,10 +1244,23 @@ static ssize_t acpi_processor_write_thro
 10.1128  		return -EFAULT;
 10.1129  
 10.1130  	state_string[count] = '\0';
 10.1131 +	if ((count > 0) && (state_string[count-1] == '\n'))
 10.1132 +		state_string[count-1] = '\0';
 10.1133  
 10.1134 -	result = acpi_processor_set_throttling(pr,
 10.1135 -					       simple_strtoul(state_string,
 10.1136 -							      NULL, 0));
 10.1137 +	charp = state_string;
 10.1138 +	if ((state_string[0] == 't') || (state_string[0] == 'T'))
 10.1139 +		charp++;
 10.1140 +
 10.1141 +	state_val = simple_strtoul(charp, NULL, 0);
 10.1142 +	if (state_val >= pr->throttling.state_count)
 10.1143 +		return -EINVAL;
 10.1144 +
 10.1145 +	snprintf(tmpbuf, 5, "%zu", state_val);
 10.1146 +
 10.1147 +	if (strcmp(tmpbuf, charp) != 0)
 10.1148 +		return -EINVAL;
 10.1149 +
 10.1150 +	result = acpi_processor_set_throttling(pr, state_val);
 10.1151  	if (result)
 10.1152  		return result;
 10.1153  
 10.1154 @@ -330,6 +1268,7 @@ static ssize_t acpi_processor_write_thro
 10.1155  }
 10.1156  
 10.1157  struct file_operations acpi_processor_throttling_fops = {
 10.1158 +	.owner = THIS_MODULE,
 10.1159  	.open = acpi_processor_throttling_open_fs,
 10.1160  	.read = seq_read,
 10.1161  	.write = acpi_processor_write_throttling,
    11.1 --- a/drivers/pci/msi-xen.c	Fri Sep 12 11:28:00 2008 +0900
    11.2 +++ b/drivers/pci/msi-xen.c	Tue Sep 16 21:25:54 2008 +0900
    11.3 @@ -15,6 +15,8 @@
    11.4  #include <linux/pci.h>
    11.5  #include <linux/proc_fs.h>
    11.6  
    11.7 +#include <xen/evtchn.h>
    11.8 +
    11.9  #include <asm/errno.h>
   11.10  #include <asm/io.h>
   11.11  #include <asm/smp.h>
   11.12 @@ -156,13 +158,15 @@ static int msi_unmap_pirq(struct pci_dev
   11.13  	int rc;
   11.14  
   11.15  	unmap.domid = msi_get_dev_owner(dev);
   11.16 -	unmap.pirq = pirq;
   11.17 +	unmap.pirq = evtchn_get_xen_pirq(pirq);
   11.18  
   11.19  	if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
   11.20  		printk(KERN_WARNING "unmap irq %x failed\n", pirq);
   11.21  
   11.22  	if (rc < 0)
   11.23  		return rc;
   11.24 +
   11.25 +	evtchn_map_pirq(pirq, 0);
   11.26  	return 0;
   11.27  }
   11.28  
   11.29 @@ -197,7 +201,7 @@ static int msi_map_pirq_to_vector(struct
   11.30  	map_irq.domid = domid;
   11.31  	map_irq.type = MAP_PIRQ_TYPE_MSI;
   11.32  	map_irq.index = -1;
   11.33 -	map_irq.pirq = pirq;
   11.34 +	map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
   11.35  	map_irq.bus = dev->bus->number;
   11.36  	map_irq.devfn = dev->devfn;
   11.37  	map_irq.entry_nr = entry_nr;
   11.38 @@ -208,8 +212,12 @@ static int msi_map_pirq_to_vector(struct
   11.39  
   11.40  	if (rc < 0)
   11.41  		return rc;
   11.42 +	/* This happens when MSI support is not enabled in Xen. */
   11.43 +	if (rc == 0 && map_irq.pirq < 0)
   11.44 +		return -ENOSYS;
   11.45  
   11.46 -	return map_irq.pirq;
   11.47 +	BUG_ON(map_irq.pirq <= 0);
   11.48 +	return evtchn_map_pirq(pirq, map_irq.pirq);
   11.49  }
   11.50  
   11.51  static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
   11.52 @@ -364,9 +372,15 @@ void pci_restore_msix_state(struct pci_d
   11.53  
   11.54  	spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
   11.55  	list_for_each_entry_safe(pirq_entry, tmp,
   11.56 -				 &msi_dev_entry->pirq_list_head, list)
   11.57 -		msi_map_pirq_to_vector(dev, pirq_entry->pirq,
   11.58 -				       pirq_entry->entry_nr, table_base);
   11.59 +				 &msi_dev_entry->pirq_list_head, list) {
   11.60 +		int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
   11.61 +						pirq_entry->entry_nr, table_base);
   11.62 +		if (rc < 0)
   11.63 +			printk(KERN_WARNING
   11.64 +			       "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
   11.65 +			       pci_name(dev), pirq_entry->entry_nr,
   11.66 +			       pirq_entry->pirq, rc);
   11.67 +	}
   11.68  	spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
   11.69  
   11.70  	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
    12.1 --- a/drivers/xen/blktap/common.h	Fri Sep 12 11:28:00 2008 +0900
    12.2 +++ b/drivers/xen/blktap/common.h	Tue Sep 16 21:25:54 2008 +0900
    12.3 @@ -89,6 +89,7 @@ typedef struct blkif_st {
    12.4  
    12.5  blkif_t *tap_alloc_blkif(domid_t domid);
    12.6  void tap_blkif_free(blkif_t *blkif);
    12.7 +void tap_blkif_kmem_cache_free(blkif_t *blkif);
    12.8  int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
    12.9  		  unsigned int evtchn);
   12.10  void tap_blkif_unmap(blkif_t *blkif);
    13.1 --- a/drivers/xen/blktap/interface.c	Fri Sep 12 11:28:00 2008 +0900
    13.2 +++ b/drivers/xen/blktap/interface.c	Tue Sep 16 21:25:54 2008 +0900
    13.3 @@ -162,8 +162,15 @@ void tap_blkif_free(blkif_t *blkif)
    13.4  {
    13.5  	atomic_dec(&blkif->refcnt);
    13.6  	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
    13.7 +	atomic_inc(&blkif->refcnt);
    13.8  
    13.9  	tap_blkif_unmap(blkif);
   13.10 +}
   13.11 +
   13.12 +void tap_blkif_kmem_cache_free(blkif_t *blkif)
   13.13 +{
   13.14 +	if (!atomic_dec_and_test(&blkif->refcnt))
   13.15 +		BUG();
   13.16  	kmem_cache_free(blkif_cachep, blkif);
   13.17  }
   13.18  
    14.1 --- a/drivers/xen/blktap/xenbus.c	Fri Sep 12 11:28:00 2008 +0900
    14.2 +++ b/drivers/xen/blktap/xenbus.c	Tue Sep 16 21:25:54 2008 +0900
    14.3 @@ -182,6 +182,7 @@ static int blktap_remove(struct xenbus_d
    14.4  			kthread_stop(be->blkif->xenblkd);
    14.5  		signal_tapdisk(be->blkif->dev_num);
    14.6  		tap_blkif_free(be->blkif);
    14.7 +		tap_blkif_kmem_cache_free(be->blkif);
    14.8  		be->blkif = NULL;
    14.9  	}
   14.10  	kfree(be);
   14.11 @@ -364,6 +365,7 @@ static void tap_frontend_changed(struct 
   14.12  			kthread_stop(be->blkif->xenblkd);
   14.13  			be->blkif->xenblkd = NULL;
   14.14  		}
   14.15 +		tap_blkif_free(be->blkif);
   14.16  		xenbus_switch_state(dev, XenbusStateClosing);
   14.17  		break;
   14.18  
    15.1 --- a/drivers/xen/core/evtchn.c	Fri Sep 12 11:28:00 2008 +0900
    15.2 +++ b/drivers/xen/core/evtchn.c	Tue Sep 16 21:25:54 2008 +0900
    15.3 @@ -66,13 +66,27 @@ enum {
    15.4  	IRQT_VIRQ,
    15.5  	IRQT_IPI,
    15.6  	IRQT_LOCAL_PORT,
    15.7 -	IRQT_CALLER_PORT
    15.8 +	IRQT_CALLER_PORT,
    15.9 +	_IRQT_COUNT
   15.10  };
   15.11  
   15.12 +#define _IRQT_BITS 4
   15.13 +#define _EVTCHN_BITS 12
   15.14 +#define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
   15.15 +
   15.16  /* Constructor for packed IRQ information. */
   15.17  static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
   15.18  {
   15.19 -	return ((type << 24) | (index << 16) | evtchn);
   15.20 +	BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
   15.21 +
   15.22 +	BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
   15.23 +	BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
   15.24 +	BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
   15.25 +	BUG_ON(index >> _INDEX_BITS);
   15.26 +
   15.27 +	BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
   15.28 +
   15.29 +	return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
   15.30  }
   15.31  
   15.32  /* Convenient shorthand for packed representation of an unbound IRQ. */
   15.33 @@ -84,17 +98,17 @@ static inline u32 mk_irq_info(u32 type, 
   15.34  
   15.35  static inline unsigned int evtchn_from_irq(int irq)
   15.36  {
   15.37 -	return (u16)(irq_info[irq]);
   15.38 +	return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
   15.39  }
   15.40  
   15.41  static inline unsigned int index_from_irq(int irq)
   15.42  {
   15.43 -	return (u8)(irq_info[irq] >> 16);
   15.44 +	return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
   15.45  }
   15.46  
   15.47  static inline unsigned int type_from_irq(int irq)
   15.48  {
   15.49 -	return (u8)(irq_info[irq] >> 24);
   15.50 +	return irq_info[irq] >> (32 - _IRQT_BITS);
   15.51  }
   15.52  
   15.53  /* IRQ <-> VIRQ mapping. */
   15.54 @@ -305,13 +319,11 @@ asmlinkage void evtchn_do_upcall(struct 
   15.55  static int find_unbound_irq(void)
   15.56  {
   15.57  	static int warned;
   15.58 -	int dynirq, irq;
   15.59 +	int irq;
   15.60  
   15.61 -	for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
   15.62 -		irq = dynirq_to_irq(dynirq);
   15.63 +	for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
   15.64  		if (irq_bindcount[irq] == 0)
   15.65  			return irq;
   15.66 -	}
   15.67  
   15.68  	if (!warned) {
   15.69  		warned = 1;
   15.70 @@ -742,22 +754,78 @@ static struct hw_interrupt_type dynirq_t
   15.71  	.retrigger = resend_irq_on_evtchn,
   15.72  };
   15.73  
   15.74 -static inline void pirq_unmask_notify(int pirq)
   15.75 +void evtchn_register_pirq(int irq)
   15.76  {
   15.77 -	struct physdev_eoi eoi = { .irq = pirq };
   15.78 -	if (unlikely(test_bit(pirq, pirq_needs_eoi)))
   15.79 +	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
   15.80 +}
   15.81 +
   15.82 +#if defined(CONFIG_X86_IO_APIC)
   15.83 +#define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
   15.84 +#elif defined(CONFIG_X86)
   15.85 +#define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < 16)
   15.86 +#else
   15.87 +#define identity_mapped_irq(irq) (1)
   15.88 +#endif
   15.89 +
   15.90 +int evtchn_map_pirq(int irq, int xen_pirq)
   15.91 +{
   15.92 +	if (irq < 0) {
   15.93 +		static DEFINE_SPINLOCK(irq_alloc_lock);
   15.94 +
   15.95 +		irq = PIRQ_BASE + NR_PIRQS - 1;
   15.96 +		spin_lock(&irq_alloc_lock);
   15.97 +		do {
   15.98 +			if (identity_mapped_irq(irq))
   15.99 +				continue;
  15.100 +			if (!index_from_irq(irq)) {
  15.101 +				BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
  15.102 +				irq_info[irq] = mk_irq_info(IRQT_PIRQ,
  15.103 +							    xen_pirq, 0);
  15.104 +				break;
  15.105 +			}
  15.106 +		} while (--irq >= PIRQ_BASE);
  15.107 +		spin_unlock(&irq_alloc_lock);
  15.108 +		if (irq < PIRQ_BASE)
  15.109 +			return -ENOSPC;
  15.110 +	} else if (!xen_pirq) {
  15.111 +		if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
  15.112 +			return -EINVAL;
  15.113 +		irq_info[irq] = IRQ_UNBOUND;
  15.114 +		return 0;
  15.115 +	} else if (type_from_irq(irq) != IRQT_PIRQ
  15.116 +		   || index_from_irq(irq) != xen_pirq) {
  15.117 +		printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
  15.118 +				"cannot map to PIRQ#%u\n",
  15.119 +		       irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
  15.120 +		return -EINVAL;
  15.121 +	}
  15.122 +	return index_from_irq(irq) ? irq : -EINVAL;
  15.123 +}
  15.124 +
  15.125 +int evtchn_get_xen_pirq(int irq)
  15.126 +{
  15.127 +	if (identity_mapped_irq(irq))
  15.128 +		return irq;
  15.129 +	BUG_ON(type_from_irq(irq) != IRQT_PIRQ);
  15.130 +	return index_from_irq(irq);
  15.131 +}
  15.132 +
  15.133 +static inline void pirq_unmask_notify(int irq)
  15.134 +{
  15.135 +	struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
  15.136 +	if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
  15.137  		VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
  15.138  }
  15.139  
  15.140 -static inline void pirq_query_unmask(int pirq)
  15.141 +static inline void pirq_query_unmask(int irq)
  15.142  {
  15.143  	struct physdev_irq_status_query irq_status;
  15.144 -	irq_status.irq = pirq;
  15.145 +	irq_status.irq = evtchn_get_xen_pirq(irq);
  15.146  	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  15.147  		irq_status.flags = 0;
  15.148 -	clear_bit(pirq, pirq_needs_eoi);
  15.149 +	clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
  15.150  	if (irq_status.flags & XENIRQSTAT_needs_eoi)
  15.151 -		set_bit(pirq, pirq_needs_eoi);
  15.152 +		set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
  15.153  }
  15.154  
  15.155  /*
  15.156 @@ -774,7 +842,7 @@ static unsigned int startup_pirq(unsigne
  15.157  	if (VALID_EVTCHN(evtchn))
  15.158  		goto out;
  15.159  
  15.160 -	bind_pirq.pirq  = irq;
  15.161 +	bind_pirq.pirq = evtchn_get_xen_pirq(irq);
  15.162  	/* NB. We are happy to share unless we are probing. */
  15.163  	bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  15.164  	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
  15.165 @@ -785,15 +853,15 @@ static unsigned int startup_pirq(unsigne
  15.166  	}
  15.167  	evtchn = bind_pirq.port;
  15.168  
  15.169 -	pirq_query_unmask(irq_to_pirq(irq));
  15.170 +	pirq_query_unmask(irq);
  15.171  
  15.172  	evtchn_to_irq[evtchn] = irq;
  15.173  	bind_evtchn_to_cpu(evtchn, 0);
  15.174 -	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
  15.175 +	irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
  15.176  
  15.177   out:
  15.178  	unmask_evtchn(evtchn);
  15.179 -	pirq_unmask_notify(irq_to_pirq(irq));
  15.180 +	pirq_unmask_notify(irq);
  15.181  
  15.182  	return 0;
  15.183  }
  15.184 @@ -814,7 +882,7 @@ static void shutdown_pirq(unsigned int i
  15.185  
  15.186  	bind_evtchn_to_cpu(evtchn, 0);
  15.187  	evtchn_to_irq[evtchn] = -1;
  15.188 -	irq_info[irq] = IRQ_UNBOUND;
  15.189 +	irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
  15.190  }
  15.191  
  15.192  static void enable_pirq(unsigned int irq)
  15.193 @@ -847,7 +915,7 @@ static void end_pirq(unsigned int irq)
  15.194  		shutdown_pirq(irq);
  15.195  	} else if (VALID_EVTCHN(evtchn)) {
  15.196  		unmask_evtchn(evtchn);
  15.197 -		pirq_unmask_notify(irq_to_pirq(irq));
  15.198 +		pirq_unmask_notify(irq);
  15.199  	}
  15.200  }
  15.201  
  15.202 @@ -994,7 +1062,7 @@ static void restore_cpu_ipis(unsigned in
  15.203  
  15.204  void irq_resume(void)
  15.205  {
  15.206 -	unsigned int cpu, pirq, irq, evtchn;
  15.207 +	unsigned int cpu, irq, evtchn;
  15.208  
  15.209  	init_evtchn_cpu_bindings();
  15.210  
  15.211 @@ -1003,12 +1071,12 @@ void irq_resume(void)
  15.212  		mask_evtchn(evtchn);
  15.213  
  15.214  	/* Check that no PIRQs are still bound. */
  15.215 -	for (pirq = 0; pirq < NR_PIRQS; pirq++)
  15.216 -		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
  15.217 +	for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
  15.218 +		BUG_ON(irq_info[irq] != IRQ_UNBOUND);
  15.219  
  15.220  	/* No IRQ <-> event-channel mappings. */
  15.221  	for (irq = 0; irq < NR_IRQS; irq++)
  15.222 -		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
  15.223 +		irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
  15.224  	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  15.225  		evtchn_to_irq[evtchn] = -1;
  15.226  
  15.227 @@ -1034,28 +1102,29 @@ void __init xen_init_IRQ(void)
  15.228  		irq_info[i] = IRQ_UNBOUND;
  15.229  
  15.230  	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  15.231 -	for (i = 0; i < NR_DYNIRQS; i++) {
  15.232 -		irq_bindcount[dynirq_to_irq(i)] = 0;
  15.233 +	for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
  15.234 +		irq_bindcount[i] = 0;
  15.235  
  15.236 -		irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
  15.237 -		irq_desc[dynirq_to_irq(i)].action = NULL;
  15.238 -		irq_desc[dynirq_to_irq(i)].depth = 1;
  15.239 -		irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
  15.240 +		irq_desc[i].status = IRQ_DISABLED;
  15.241 +		irq_desc[i].action = NULL;
  15.242 +		irq_desc[i].depth = 1;
  15.243 +		irq_desc[i].chip = &dynirq_type;
  15.244  	}
  15.245  
  15.246  	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  15.247 -	for (i = 0; i < NR_PIRQS; i++) {
  15.248 -		irq_bindcount[pirq_to_irq(i)] = 1;
  15.249 +	for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) {
  15.250 +		irq_bindcount[i] = 1;
  15.251  
  15.252  #ifdef RTC_IRQ
  15.253  		/* If not domain 0, force our RTC driver to fail its probe. */
  15.254 -		if ((i == RTC_IRQ) && !is_initial_xendomain())
  15.255 +		if (identity_mapped_irq(i) && ((i - PIRQ_BASE) == RTC_IRQ)
  15.256 +		    && !is_initial_xendomain())
  15.257  			continue;
  15.258  #endif
  15.259  
  15.260 -		irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
  15.261 -		irq_desc[pirq_to_irq(i)].action = NULL;
  15.262 -		irq_desc[pirq_to_irq(i)].depth = 1;
  15.263 -		irq_desc[pirq_to_irq(i)].chip = &pirq_type;
  15.264 +		irq_desc[i].status = IRQ_DISABLED;
  15.265 +		irq_desc[i].action = NULL;
  15.266 +		irq_desc[i].depth = 1;
  15.267 +		irq_desc[i].chip = &pirq_type;
  15.268  	}
  15.269  }
    16.1 --- a/drivers/xen/pciback/conf_space_capability_msi.c	Fri Sep 12 11:28:00 2008 +0900
    16.2 +++ b/drivers/xen/pciback/conf_space_capability_msi.c	Tue Sep 16 21:25:54 2008 +0900
    16.3 @@ -2,6 +2,7 @@
    16.4   * PCI Backend -- Configuration overlay for MSI capability
    16.5   */
    16.6  #include <linux/pci.h>
    16.7 +#include <linux/slab.h>
    16.8  #include "conf_space.h"
    16.9  #include "conf_space_capability.h"
   16.10  #include <xen/interface/io/pciif.h>
   16.11 @@ -37,23 +38,32 @@ int pciback_disable_msi(struct pciback_d
   16.12  int pciback_enable_msix(struct pciback_device *pdev,
   16.13  		struct pci_dev *dev, struct xen_pci_op *op)
   16.14  {
   16.15 -	int result;
   16.16 +	int i, result;
   16.17 +	struct msix_entry *entries;
   16.18  
   16.19  	if (op->value > SH_INFO_MAX_VEC)
   16.20  		return -EINVAL;
   16.21 -	else {
   16.22 -		struct msix_entry entries[op->value];
   16.23 -		int i;
   16.24  
   16.25 -		for (i = 0; i < op->value; i++) {
   16.26 -			entries[i].entry = op->msix_entries[i].entry;
   16.27 -			entries[i].vector = op->msix_entries[i].vector;
   16.28 -		}
   16.29 +	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
   16.30 +	if (entries == NULL)
   16.31 +		return -ENOMEM;
   16.32  
   16.33 -		result = pci_enable_msix(dev, entries, op->value);
   16.34 -		op->value = result;
   16.35 +	for (i = 0; i < op->value; i++) {
   16.36 +		entries[i].entry = op->msix_entries[i].entry;
   16.37 +		entries[i].vector = op->msix_entries[i].vector;
   16.38  	}
   16.39  
   16.40 +	result = pci_enable_msix(dev, entries, op->value);
   16.41 +
   16.42 +	for (i = 0; i < op->value; i++) {
   16.43 +		op->msix_entries[i].entry = entries[i].entry;
   16.44 +		op->msix_entries[i].vector = entries[i].vector;
   16.45 +	}
   16.46 +
   16.47 +	kfree(entries);
   16.48 +
   16.49 +	op->value = result;
   16.50 +
   16.51  	return result;
   16.52  }
   16.53  
    17.1 --- a/drivers/xen/xenoprof/xenoprofile.c	Fri Sep 12 11:28:00 2008 +0900
    17.2 +++ b/drivers/xen/xenoprof/xenoprofile.c	Tue Sep 16 21:25:54 2008 +0900
    17.3 @@ -35,14 +35,14 @@
    17.4  #define MAX_XENOPROF_SAMPLES 16
    17.5  
    17.6  /* sample buffers shared with Xen */
    17.7 -xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
    17.8 +static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
    17.9  /* Shared buffer area */
   17.10 -struct xenoprof_shared_buffer shared_buffer;
   17.11 +static struct xenoprof_shared_buffer shared_buffer;
   17.12  
   17.13  /* Passive sample buffers shared with Xen */
   17.14 -xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
   17.15 +static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
   17.16  /* Passive shared buffer area */
   17.17 -struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
   17.18 +static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
   17.19  
   17.20  static int xenoprof_start(void);
   17.21  static void xenoprof_stop(void);
   17.22 @@ -54,11 +54,11 @@ static int active_defined;
   17.23  extern unsigned long backtrace_depth;
   17.24  
   17.25  /* Number of buffers in shared area (one per VCPU) */
   17.26 -int nbuf;
   17.27 +static int nbuf;
   17.28  /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
   17.29 -int ovf_irq[NR_CPUS];
   17.30 +static int ovf_irq[NR_CPUS];
   17.31  /* cpu model type string - copied from Xen on XENOPROF_init command */
   17.32 -char cpu_type[XENOPROF_CPU_TYPE_SIZE];
   17.33 +static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
   17.34  
   17.35  #ifdef CONFIG_PM
   17.36  
   17.37 @@ -111,11 +111,11 @@ static void exit_driverfs(void)
   17.38  #define exit_driverfs() do { } while (0)
   17.39  #endif /* CONFIG_PM */
   17.40  
   17.41 -unsigned long long oprofile_samples = 0;
   17.42 -unsigned long long p_oprofile_samples = 0;
   17.43 +static unsigned long long oprofile_samples;
   17.44 +static unsigned long long p_oprofile_samples;
   17.45  
   17.46 -unsigned int pdomains;
   17.47 -struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
   17.48 +static unsigned int pdomains;
   17.49 +static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
   17.50  
   17.51  /* Check whether the given entry is an escape code */
   17.52  static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
   17.53 @@ -483,8 +483,7 @@ static void xenoprof_dummy_backtrace(str
   17.54  }
   17.55  
   17.56  
   17.57 -
   17.58 -struct oprofile_operations xenoprof_ops = {
   17.59 +static struct oprofile_operations xenoprof_ops = {
   17.60  #ifdef HAVE_XENOPROF_CREATE_FILES
   17.61  	.create_files 	= xenoprof_create_files,
   17.62  #endif
    18.1 --- a/include/acpi/processor.h	Fri Sep 12 11:28:00 2008 +0900
    18.2 +++ b/include/acpi/processor.h	Tue Sep 16 21:25:54 2008 +0900
    18.3 @@ -18,9 +18,12 @@
    18.4  
    18.5  #define ACPI_PDC_REVISION_ID		0x1
    18.6  
    18.7 -#define ACPI_PSD_REV0_REVISION		0 /* Support for _PSD as in ACPI 3.0 */
    18.8 +#define ACPI_PSD_REV0_REVISION		0	/* Support for _PSD as in ACPI 3.0 */
    18.9  #define ACPI_PSD_REV0_ENTRIES		5
   18.10  
   18.11 +#define ACPI_TSD_REV0_REVISION		0	/* Support for _PSD as in ACPI 3.0 */
   18.12 +#define ACPI_TSD_REV0_ENTRIES		5
   18.13 +
   18.14  #ifdef CONFIG_XEN
   18.15  #define NR_ACPI_CPUS			(NR_CPUS < 256 ? 256 : NR_CPUS)
   18.16  #else
   18.17 @@ -142,24 +145,62 @@ struct acpi_processor_performance {
   18.18  
   18.19  /* Throttling Control */
   18.20  
   18.21 +struct acpi_tsd_package {
   18.22 +	acpi_integer num_entries;
   18.23 +	acpi_integer revision;
   18.24 +	acpi_integer domain;
   18.25 +	acpi_integer coord_type;
   18.26 +	acpi_integer num_processors;
   18.27 +} __attribute__ ((packed));
   18.28 +
   18.29 +struct acpi_ptc_register {
   18.30 +	u8 descriptor;
   18.31 +	u16 length;
   18.32 +	u8 space_id;
   18.33 +	u8 bit_width;
   18.34 +	u8 bit_offset;
   18.35 +	u8 reserved;
   18.36 +	u64 address;
   18.37 +} __attribute__ ((packed));
   18.38 +
   18.39 +struct acpi_processor_tx_tss {
   18.40 +	acpi_integer freqpercentage;	/* */
   18.41 +	acpi_integer power;	/* milliWatts */
   18.42 +	acpi_integer transition_latency;	/* microseconds */
   18.43 +	acpi_integer control;	/* control value */
   18.44 +	acpi_integer status;	/* success indicator */
   18.45 +};
   18.46  struct acpi_processor_tx {
   18.47  	u16 power;
   18.48  	u16 performance;
   18.49  };
   18.50  
   18.51 +struct acpi_processor;
   18.52  struct acpi_processor_throttling {
   18.53 -	int state;
   18.54 +	unsigned int state;
   18.55 +	unsigned int platform_limit;
   18.56 +	struct acpi_pct_register control_register;
   18.57 +	struct acpi_pct_register status_register;
   18.58 +	unsigned int state_count;
   18.59 +	struct acpi_processor_tx_tss *states_tss;
   18.60 +	struct acpi_tsd_package domain_info;
   18.61 +	cpumask_t shared_cpu_map;
   18.62 +	int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
   18.63 +	int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
   18.64 +					      int state);
   18.65 +
   18.66  	u32 address;
   18.67  	u8 duty_offset;
   18.68  	u8 duty_width;
   18.69 -	int state_count;
   18.70 +	u8 tsd_valid_flag;
   18.71 +	unsigned int shared_type;
   18.72  	struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING];
   18.73  };
   18.74  
   18.75  /* Limit Interface */
   18.76  
   18.77  struct acpi_processor_lx {
   18.78 -	int px;			/* performace state */
   18.79 +	int px;			/* performance state */
   18.80  	int tx;			/* throttle level */
   18.81  };
   18.82  
   18.83 @@ -186,6 +227,9 @@ struct acpi_processor {
   18.84  	u32 id;
   18.85  	u32 pblk;
   18.86  	int performance_platform_limit;
   18.87 +	int throttling_platform_limit;
   18.88 +	/* 0 - states 0..n-th state available */
   18.89 +
   18.90  	struct acpi_processor_flags flags;
   18.91  	struct acpi_processor_power power;
   18.92  	struct acpi_processor_performance *performance;
   18.93 @@ -273,10 +317,11 @@ static inline int acpi_processor_ppc_has
   18.94  #endif				/* CONFIG_CPU_FREQ */
   18.95  
   18.96  /* in processor_throttling.c */
   18.97 +int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
   18.98  int acpi_processor_get_throttling_info(struct acpi_processor *pr);
   18.99 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  18.100 +extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  18.101  extern struct file_operations acpi_processor_throttling_fops;
  18.102 -
  18.103 +extern void acpi_processor_throttling_init(void);
  18.104  /* in processor_idle.c */
  18.105  int acpi_processor_power_init(struct acpi_processor *pr,
  18.106  			      struct acpi_device *device);
  18.107 @@ -326,7 +371,7 @@ struct processor_extcntl_ops {
  18.108  	/* Transfer processor PM events to external control logic */
  18.109  	int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
  18.110  	/* Notify physical processor status to external control logic */
  18.111 -	int (*hotplug)(struct acpi_processor *pr, int event);
  18.112 +	int (*hotplug)(struct acpi_processor *pr, int type);
  18.113  };
  18.114  extern const struct processor_extcntl_ops *processor_extcntl_ops;
  18.115  
    19.1 --- a/include/asm-i386/mach-xen/irq_vectors.h	Fri Sep 12 11:28:00 2008 +0900
    19.2 +++ b/include/asm-i386/mach-xen/irq_vectors.h	Tue Sep 16 21:25:54 2008 +0900
    19.3 @@ -108,7 +108,13 @@
    19.4   */
    19.5  
    19.6  #define PIRQ_BASE		0
    19.7 -#define NR_PIRQS		256
    19.8 +#if !defined(MAX_IO_APICS)
    19.9 +# define NR_PIRQS		(NR_VECTORS + 32 * NR_CPUS)
   19.10 +#elif NR_CPUS < MAX_IO_APICS
   19.11 +# define NR_PIRQS		(NR_VECTORS + 32 * NR_CPUS)
   19.12 +#else
   19.13 +# define NR_PIRQS		(NR_VECTORS + 32 * MAX_IO_APICS)
   19.14 +#endif
   19.15  
   19.16  #define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
   19.17  #define NR_DYNIRQS		256
   19.18 @@ -116,10 +122,4 @@
   19.19  #define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
   19.20  #define NR_IRQ_VECTORS		NR_IRQS
   19.21  
   19.22 -#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
   19.23 -#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
   19.24 -
   19.25 -#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
   19.26 -#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
   19.27 -
   19.28  #endif /* _ASM_IRQ_VECTORS_H */
    20.1 --- a/include/asm-ia64/irq.h	Fri Sep 12 11:28:00 2008 +0900
    20.2 +++ b/include/asm-ia64/irq.h	Tue Sep 16 21:25:54 2008 +0900
    20.3 @@ -34,12 +34,6 @@
    20.4  #define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
    20.5  #define NR_IRQ_VECTORS		NR_IRQS
    20.6  
    20.7 -#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
    20.8 -#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
    20.9 -
   20.10 -#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
   20.11 -#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
   20.12 -
   20.13  #define RESCHEDULE_VECTOR	0
   20.14  #define IPI_VECTOR		1
   20.15  #define CMCP_VECTOR		2
    21.1 --- a/include/asm-powerpc/xen/asm/hypervisor.h	Fri Sep 12 11:28:00 2008 +0900
    21.2 +++ b/include/asm-powerpc/xen/asm/hypervisor.h	Tue Sep 16 21:25:54 2008 +0900
    21.3 @@ -154,13 +154,6 @@ int direct_remap_pfn_range(struct vm_are
    21.4  
    21.5  #define NR_IRQ_VECTORS		NR_IRQS
    21.6  
    21.7 -#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
    21.8 -#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
    21.9 -
   21.10 -#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
   21.11 -#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
   21.12 -
   21.13 -
   21.14  /* END:  all of these need a new home */
   21.15  
   21.16  #if defined(CONFIG_X86_64)
    22.1 --- a/include/asm-x86_64/mach-xen/irq_vectors.h	Fri Sep 12 11:28:00 2008 +0900
    22.2 +++ b/include/asm-x86_64/mach-xen/irq_vectors.h	Tue Sep 16 21:25:54 2008 +0900
    22.3 @@ -114,10 +114,4 @@
    22.4  #define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
    22.5  #define NR_IRQ_VECTORS		NR_IRQS
    22.6  
    22.7 -#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
    22.8 -#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
    22.9 -
   22.10 -#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
   22.11 -#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
   22.12 -
   22.13  #endif /* _ASM_IRQ_VECTORS_H */
    23.1 --- a/include/xen/evtchn.h	Fri Sep 12 11:28:00 2008 +0900
    23.2 +++ b/include/xen/evtchn.h	Tue Sep 16 21:25:54 2008 +0900
    23.3 @@ -101,6 +101,13 @@ asmlinkage void evtchn_do_upcall(struct 
    23.4  /* Entry point for notifications into the userland character device. */
    23.5  void evtchn_device_upcall(int port);
    23.6  
    23.7 +/* Mark a PIRQ as unavailable for dynamic allocation. */
    23.8 +void evtchn_register_pirq(int irq);
    23.9 +/* Map a Xen-supplied PIRQ to a dynamically allocated one. */
   23.10 +int evtchn_map_pirq(int irq, int xen_pirq);
   23.11 +/* Look up a Xen-supplied PIRQ for a dynamically allocated one. */
   23.12 +int evtchn_get_xen_pirq(int irq);
   23.13 +
   23.14  void mask_evtchn(int port);
   23.15  void disable_all_local_evtchn(void);
   23.16  void unmask_evtchn(int port);
    24.1 --- a/include/xen/interface/memory.h	Fri Sep 12 11:28:00 2008 +0900
    24.2 +++ b/include/xen/interface/memory.h	Tue Sep 16 21:25:54 2008 +0900
    24.3 @@ -204,6 +204,7 @@ struct xen_add_to_physmap {
    24.4      /* Source mapping space. */
    24.5  #define XENMAPSPACE_shared_info 0 /* shared info page */
    24.6  #define XENMAPSPACE_grant_table 1 /* grant table page */
    24.7 +#define XENMAPSPACE_mfn         2 /* usual MFN */
    24.8      unsigned int space;
    24.9  
   24.10      /* Index into source mapping space. */
   24.11 @@ -216,6 +217,22 @@ typedef struct xen_add_to_physmap xen_ad
   24.12  DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
   24.13  
   24.14  /*
   24.15 + * Unmaps the page appearing at a particular GPFN from the specified guest's
   24.16 + * pseudophysical address space.
   24.17 + * arg == addr of xen_remove_from_physmap_t.
   24.18 + */
   24.19 +#define XENMEM_remove_from_physmap      15
   24.20 +struct xen_remove_from_physmap {
   24.21 +    /* Which domain to change the mapping for. */
   24.22 +    domid_t domid;
   24.23 +
   24.24 +    /* GPFN of the current mapping of the page. */
   24.25 +    xen_pfn_t     gpfn;
   24.26 +};
   24.27 +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
   24.28 +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
   24.29 +
   24.30 +/*
   24.31   * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
   24.32   * code on failure. This call only works for auto-translated guests.
   24.33   */
    25.1 --- a/include/xen/interface/platform.h	Fri Sep 12 11:28:00 2008 +0900
    25.2 +++ b/include/xen/interface/platform.h	Tue Sep 16 21:25:54 2008 +0900
    25.3 @@ -97,7 +97,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_read_memty
    25.4  #define XENPF_microcode_update    35
    25.5  struct xenpf_microcode_update {
    25.6      /* IN variables. */
    25.7 -    XEN_GUEST_HANDLE(void) data;      /* Pointer to microcode data */
    25.8 +    XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */
    25.9      uint32_t length;                  /* Length of microcode data. */
   25.10  };
   25.11  typedef struct xenpf_microcode_update xenpf_microcode_update_t;
   25.12 @@ -289,7 +289,7 @@ struct xen_psd_package {
   25.13  
   25.14  struct xen_processor_performance {
   25.15      uint32_t flags;     /* flag for Px sub info type */
   25.16 -    uint32_t ppc;       /* Platform limitation on freq usage */
   25.17 +    uint32_t platform_limit;  /* Platform limitation on freq usage */
   25.18      struct xen_pct_register control_register;
   25.19      struct xen_pct_register status_register;
   25.20      uint32_t state_count;     /* total available performance states */