ia64/linux-2.6.18-xen.hg

changeset 666:c84f326ff58a

ACPI: Backport latest T-state support from latest Linux upstream

The purpose is to:
- add ACPI 3.0 _TPC _TSS _PTC throttling support
- Enable MSR (FixedHW) support for T-States
- Update the t-state for every affected cpu when t-state is changed

The backport source is below Linux git tree index:
commit 93811d94f7e9bcfeed7d6ba75ea5d9c80a70ab95
Date: Tue Sep 9 12:23:41 2008 -0700

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Sep 11 16:52:15 2008 +0100 (2008-09-11)
parents 748f324a4b2d
children e1e8cc97331d
files drivers/acpi/processor_core.c drivers/acpi/processor_throttling.c include/acpi/processor.h
line diff
     1.1 --- a/drivers/acpi/processor_core.c	Wed Sep 10 10:54:08 2008 +0100
     1.2 +++ b/drivers/acpi/processor_core.c	Thu Sep 11 16:52:15 2008 +0100
     1.3 @@ -67,6 +67,7 @@
     1.4  #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
     1.5  #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
     1.6  #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
     1.7 +#define ACPI_PROCESSOR_NOTIFY_THROTTLING     0x82
     1.8  
     1.9  #define ACPI_PROCESSOR_LIMIT_USER	0
    1.10  #define ACPI_PROCESSOR_LIMIT_THERMAL	1
    1.11 @@ -618,6 +619,10 @@ static void acpi_processor_notify(acpi_h
    1.12  		acpi_processor_cst_has_changed(pr);
    1.13  		acpi_bus_generate_event(device, event, 0);
    1.14  		break;
    1.15 +	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
    1.16 +		acpi_processor_tstate_has_changed(pr);
    1.17 +		acpi_bus_generate_event(device, event, 0);
    1.18 +		break;
    1.19  	default:
    1.20  		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
    1.21  				  "Unsupported event [0x%x]\n", event));
    1.22 @@ -965,6 +970,8 @@ static int __init acpi_processor_init(vo
    1.23  
    1.24  	acpi_processor_ppc_init();
    1.25  
    1.26 +	acpi_processor_throttling_init();
    1.27 +
    1.28  	return 0;
    1.29  }
    1.30  
     2.1 --- a/drivers/acpi/processor_throttling.c	Wed Sep 10 10:54:08 2008 +0100
     2.2 +++ b/drivers/acpi/processor_throttling.c	Thu Sep 11 16:52:15 2008 +0100
     2.3 @@ -29,6 +29,7 @@
     2.4  #include <linux/kernel.h>
     2.5  #include <linux/module.h>
     2.6  #include <linux/init.h>
     2.7 +#include <linux/sched.h>
     2.8  #include <linux/cpufreq.h>
     2.9  #include <linux/proc_fs.h>
    2.10  #include <linux/seq_file.h>
    2.11 @@ -41,21 +42,559 @@
    2.12  
    2.13  #define ACPI_PROCESSOR_COMPONENT        0x01000000
    2.14  #define ACPI_PROCESSOR_CLASS            "processor"
    2.15 -#define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
    2.16  #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
    2.17 -ACPI_MODULE_NAME("acpi_processor")
    2.18 +ACPI_MODULE_NAME("processor_throttling");
    2.19 +
    2.20 +struct throttling_tstate {
    2.21 +	unsigned int cpu;		/* cpu nr */
    2.22 +	int target_state;		/* target T-state */
    2.23 +};
    2.24 +
    2.25 +#define THROTTLING_PRECHANGE       (1)
    2.26 +#define THROTTLING_POSTCHANGE      (2)
    2.27 +
    2.28 +static int acpi_processor_get_throttling(struct acpi_processor *pr);
    2.29 +int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
    2.30 +
    2.31 +static int acpi_processor_update_tsd_coord(void)
    2.32 +{
    2.33 +	int count, count_target;
    2.34 +	int retval = 0;
    2.35 +	unsigned int i, j;
    2.36 +	cpumask_t covered_cpus;
    2.37 +	struct acpi_processor *pr, *match_pr;
    2.38 +	struct acpi_tsd_package *pdomain, *match_pdomain;
    2.39 +	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
    2.40 +
    2.41 +	/*
    2.42 +	 * Now that we have _TSD data from all CPUs, lets setup T-state
    2.43 +	 * coordination between all CPUs.
    2.44 +	 */
    2.45 +	for_each_possible_cpu(i) {
    2.46 +		pr = processors[i];
    2.47 +		if (!pr)
    2.48 +			continue;
    2.49 +
    2.50 +		/* Basic validity check for domain info */
    2.51 +		pthrottling = &(pr->throttling);
    2.52 +
    2.53 +		/*
    2.54 +		 * If tsd package for one cpu is invalid, the coordination
    2.55 +		 * among all CPUs is thought as invalid.
    2.56 +		 * Maybe it is ugly.
    2.57 +		 */
    2.58 +		if (!pthrottling->tsd_valid_flag) {
    2.59 +			retval = -EINVAL;
    2.60 +			break;
    2.61 +		}
    2.62 +	}
    2.63 +	if (retval)
    2.64 +		goto err_ret;
    2.65 +
    2.66 +	cpus_clear(covered_cpus);
    2.67 +	for_each_possible_cpu(i) {
    2.68 +		pr = processors[i];
    2.69 +		if (!pr)
    2.70 +			continue;
    2.71 +
    2.72 +		if (cpu_isset(i, covered_cpus))
    2.73 +			continue;
    2.74 +		pthrottling = &pr->throttling;
    2.75 +
    2.76 +		pdomain = &(pthrottling->domain_info);
    2.77 +		cpu_set(i, pthrottling->shared_cpu_map);
    2.78 +		cpu_set(i, covered_cpus);
    2.79 +		/*
    2.80 +		 * If the number of processor in the TSD domain is 1, it is
    2.81 +		 * unnecessary to parse the coordination for this CPU.
    2.82 +		 */
    2.83 +		if (pdomain->num_processors <= 1)
    2.84 +			continue;
    2.85 +
    2.86 +		/* Validate the Domain info */
    2.87 +		count_target = pdomain->num_processors;
    2.88 +		count = 1;
    2.89 +
    2.90 +		for_each_possible_cpu(j) {
    2.91 +			if (i == j)
    2.92 +				continue;
    2.93 +
    2.94 +			match_pr = processors[j];
    2.95 +			if (!match_pr)
    2.96 +				continue;
    2.97 +
    2.98 +			match_pthrottling = &(match_pr->throttling);
    2.99 +			match_pdomain = &(match_pthrottling->domain_info);
   2.100 +			if (match_pdomain->domain != pdomain->domain)
   2.101 +				continue;
   2.102 +
   2.103 +			/* Here i and j are in the same domain.
   2.104 +			 * If two TSD packages have the same domain, they
   2.105 +			 * should have the same num_porcessors and
   2.106 +			 * coordination type. Otherwise it will be regarded
   2.107 +			 * as illegal.
   2.108 +			 */
   2.109 +			if (match_pdomain->num_processors != count_target) {
   2.110 +				retval = -EINVAL;
   2.111 +				goto err_ret;
   2.112 +			}
   2.113 +
   2.114 +			if (pdomain->coord_type != match_pdomain->coord_type) {
   2.115 +				retval = -EINVAL;
   2.116 +				goto err_ret;
   2.117 +			}
   2.118 +
   2.119 +			cpu_set(j, covered_cpus);
   2.120 +			cpu_set(j, pthrottling->shared_cpu_map);
   2.121 +			count++;
   2.122 +		}
   2.123 +		for_each_possible_cpu(j) {
   2.124 +			if (i == j)
   2.125 +				continue;
   2.126 +
   2.127 +			match_pr = processors[j];
   2.128 +			if (!match_pr)
   2.129 +				continue;
   2.130 +
   2.131 +			match_pthrottling = &(match_pr->throttling);
   2.132 +			match_pdomain = &(match_pthrottling->domain_info);
   2.133 +			if (match_pdomain->domain != pdomain->domain)
   2.134 +				continue;
   2.135 +
   2.136 +			/*
   2.137 +			 * If some CPUS have the same domain, they
   2.138 +			 * will have the same shared_cpu_map.
   2.139 +			 */
   2.140 +			match_pthrottling->shared_cpu_map =
   2.141 +				pthrottling->shared_cpu_map;
   2.142 +		}
   2.143 +	}
   2.144 +
   2.145 +err_ret:
   2.146 +	for_each_possible_cpu(i) {
   2.147 +		pr = processors[i];
   2.148 +		if (!pr)
   2.149 +			continue;
   2.150 +
   2.151 +		/*
   2.152 +		 * Assume no coordination on any error parsing domain info.
   2.153 +		 * The coordination type will be forced as SW_ALL.
   2.154 +		 */
   2.155 +		if (retval) {
   2.156 +			pthrottling = &(pr->throttling);
   2.157 +			cpus_clear(pthrottling->shared_cpu_map);
   2.158 +			cpu_set(i, pthrottling->shared_cpu_map);
   2.159 +			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
   2.160 +		}
   2.161 +	}
   2.162 +
   2.163 +	return retval;
   2.164 +}
   2.165 +
   2.166 +/*
   2.167 + * Update the T-state coordination after the _TSD
   2.168 + * data for all cpus is obtained.
   2.169 + */
   2.170 +void acpi_processor_throttling_init(void)
   2.171 +{
   2.172 +	if (acpi_processor_update_tsd_coord())
   2.173 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   2.174 +			"Assume no T-state coordination\n"));
   2.175 +
   2.176 +	return;
   2.177 +}
   2.178 +
   2.179 +static int acpi_processor_throttling_notifier(unsigned long event, void *data)
   2.180 +{
   2.181 +	struct throttling_tstate *p_tstate = data;
   2.182 +	struct acpi_processor *pr;
   2.183 +	unsigned int cpu ;
   2.184 +	int target_state;
   2.185 +	struct acpi_processor_limit *p_limit;
   2.186 +	struct acpi_processor_throttling *p_throttling;
   2.187 +
   2.188 +	cpu = p_tstate->cpu;
   2.189 +	pr = processors[cpu];
   2.190 +	if (!pr) {
   2.191 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
   2.192 +		return 0;
   2.193 +	}
   2.194 +	if (!pr->flags.throttling) {
   2.195 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
   2.196 +				"unsupported on CPU %d\n", cpu));
   2.197 +		return 0;
   2.198 +	}
   2.199 +	target_state = p_tstate->target_state;
   2.200 +	p_throttling = &(pr->throttling);
   2.201 +	switch (event) {
   2.202 +	case THROTTLING_PRECHANGE:
   2.203 +		/*
   2.204 +		 * Prechange event is used to choose one proper t-state,
   2.205 +		 * which meets the limits of thermal, user and _TPC.
   2.206 +		 */
   2.207 +		p_limit = &pr->limit;
   2.208 +		if (p_limit->thermal.tx > target_state)
   2.209 +			target_state = p_limit->thermal.tx;
   2.210 +		if (p_limit->user.tx > target_state)
   2.211 +			target_state = p_limit->user.tx;
   2.212 +		if (pr->throttling_platform_limit > target_state)
   2.213 +			target_state = pr->throttling_platform_limit;
   2.214 +		if (target_state >= p_throttling->state_count) {
   2.215 +			printk(KERN_WARNING
   2.216 +				"Exceed the limit of T-state \n");
   2.217 +			target_state = p_throttling->state_count - 1;
   2.218 +		}
   2.219 +		p_tstate->target_state = target_state;
   2.220 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
   2.221 +				"target T-state of CPU %d is T%d\n",
   2.222 +				cpu, target_state));
   2.223 +		break;
   2.224 +	case THROTTLING_POSTCHANGE:
   2.225 +		/*
   2.226 +		 * Postchange event is only used to update the
   2.227 +		 * T-state flag of acpi_processor_throttling.
   2.228 +		 */
   2.229 +		p_throttling->state = target_state;
   2.230 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
   2.231 +				"CPU %d is switched to T%d\n",
   2.232 +				cpu, target_state));
   2.233 +		break;
   2.234 +	default:
   2.235 +		printk(KERN_WARNING
   2.236 +			"Unsupported Throttling notifier event\n");
   2.237 +		break;
   2.238 +	}
   2.239 +
   2.240 +	return 0;
   2.241 +}
   2.242 +
   2.243 +/*
   2.244 + * _TPC - Throttling Present Capabilities
   2.245 + */
   2.246 +static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
   2.247 +{
   2.248 +	acpi_status status = 0;
   2.249 +	unsigned long tpc = 0;
   2.250 +
   2.251 +	if (!pr)
   2.252 +		return -EINVAL;
   2.253 +	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
   2.254 +	if (ACPI_FAILURE(status)) {
   2.255 +		if (status != AE_NOT_FOUND) {
   2.256 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
   2.257 +		}
   2.258 +		return -ENODEV;
   2.259 +	}
   2.260 +	pr->throttling_platform_limit = (int)tpc;
   2.261 +	return 0;
   2.262 +}
   2.263 +
   2.264 +int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
   2.265 +{
   2.266 +	int result = 0;
   2.267 +	int throttling_limit;
   2.268 +	int current_state;
   2.269 +	struct acpi_processor_limit *limit;
   2.270 +	int target_state;
   2.271 +
   2.272 +	result = acpi_processor_get_platform_limit(pr);
   2.273 +	if (result) {
   2.274 +		/* Throttling Limit is unsupported */
   2.275 +		return result;
   2.276 +	}
   2.277 +
   2.278 +	throttling_limit = pr->throttling_platform_limit;
   2.279 +	if (throttling_limit >= pr->throttling.state_count) {
   2.280 +		/* Uncorrect Throttling Limit */
   2.281 +		return -EINVAL;
   2.282 +	}
   2.283 +
   2.284 +	current_state = pr->throttling.state;
   2.285 +	if (current_state > throttling_limit) {
   2.286 +		/*
   2.287 +		 * The current state can meet the requirement of
   2.288 +		 * _TPC limit. But it is reasonable that OSPM changes
   2.289 +		 * t-states from high to low for better performance.
   2.290 +		 * Of course the limit condition of thermal
   2.291 +		 * and user should be considered.
   2.292 +		 */
   2.293 +		limit = &pr->limit;
   2.294 +		target_state = throttling_limit;
   2.295 +		if (limit->thermal.tx > target_state)
   2.296 +			target_state = limit->thermal.tx;
   2.297 +		if (limit->user.tx > target_state)
   2.298 +			target_state = limit->user.tx;
   2.299 +	} else if (current_state == throttling_limit) {
   2.300 +		/*
   2.301 +		 * Unnecessary to change the throttling state
   2.302 +		 */
   2.303 +		return 0;
   2.304 +	} else {
   2.305 +		/*
   2.306 +		 * If the current state is lower than the limit of _TPC, it
   2.307 +		 * will be forced to switch to the throttling state defined
   2.308 +		 * by throttling_platfor_limit.
   2.309 +		 * Because the previous state meets with the limit condition
   2.310 +		 * of thermal and user, it is unnecessary to check it again.
   2.311 +		 */
   2.312 +		target_state = throttling_limit;
   2.313 +	}
   2.314 +	return acpi_processor_set_throttling(pr, target_state);
   2.315 +}
   2.316 +
   2.317 +/*
   2.318 + * _PTC - Processor Throttling Control (and status) register location
   2.319 + */
   2.320 +static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
   2.321 +{
   2.322 +	int result = 0;
   2.323 +	acpi_status status = 0;
   2.324 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   2.325 +	union acpi_object *ptc = NULL;
   2.326 +	union acpi_object obj = { 0 };
   2.327 +	struct acpi_processor_throttling *throttling;
   2.328 +
   2.329 +	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
   2.330 +	if (ACPI_FAILURE(status)) {
   2.331 +		if (status != AE_NOT_FOUND) {
   2.332 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
   2.333 +		}
   2.334 +		return -ENODEV;
   2.335 +	}
   2.336 +
   2.337 +	ptc = (union acpi_object *)buffer.pointer;
   2.338 +	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
   2.339 +	    || (ptc->package.count != 2)) {
   2.340 +		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
   2.341 +		result = -EFAULT;
   2.342 +		goto end;
   2.343 +	}
   2.344 +
   2.345 +	/*
   2.346 +	 * control_register
   2.347 +	 */
   2.348 +
   2.349 +	obj = ptc->package.elements[0];
   2.350 +
   2.351 +	if ((obj.type != ACPI_TYPE_BUFFER)
   2.352 +	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
   2.353 +	    || (obj.buffer.pointer == NULL)) {
   2.354 +		printk(KERN_ERR PREFIX
   2.355 +		       "Invalid _PTC data (control_register)\n");
   2.356 +		result = -EFAULT;
   2.357 +		goto end;
   2.358 +	}
   2.359 +	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
   2.360 +	       sizeof(struct acpi_ptc_register));
   2.361 +
   2.362 +	/*
   2.363 +	 * status_register
   2.364 +	 */
   2.365 +
   2.366 +	obj = ptc->package.elements[1];
   2.367 +
   2.368 +	if ((obj.type != ACPI_TYPE_BUFFER)
   2.369 +	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
   2.370 +	    || (obj.buffer.pointer == NULL)) {
   2.371 +		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
   2.372 +		result = -EFAULT;
   2.373 +		goto end;
   2.374 +	}
   2.375 +
   2.376 +	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
   2.377 +	       sizeof(struct acpi_ptc_register));
   2.378 +
   2.379 +	throttling = &pr->throttling;
   2.380 +
   2.381 +	if ((throttling->control_register.bit_width +
   2.382 +		throttling->control_register.bit_offset) > 32) {
   2.383 +		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
   2.384 +		result = -EFAULT;
   2.385 +		goto end;
   2.386 +	}
   2.387 +
   2.388 +	if ((throttling->status_register.bit_width +
   2.389 +		throttling->status_register.bit_offset) > 32) {
   2.390 +		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
   2.391 +		result = -EFAULT;
   2.392 +		goto end;
   2.393 +	}
   2.394 +
   2.395 +      end:
   2.396 +	kfree(buffer.pointer);
   2.397 +
   2.398 +	return result;
   2.399 +}
   2.400 +
   2.401 +/*
   2.402 + * _TSS - Throttling Supported States
   2.403 + */
   2.404 +static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
   2.405 +{
   2.406 +	int result = 0;
   2.407 +	acpi_status status = AE_OK;
   2.408 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   2.409 +	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
   2.410 +	struct acpi_buffer state = { 0, NULL };
   2.411 +	union acpi_object *tss = NULL;
   2.412 +	int i;
   2.413 +
   2.414 +	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
   2.415 +	if (ACPI_FAILURE(status)) {
   2.416 +		if (status != AE_NOT_FOUND) {
   2.417 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
   2.418 +		}
   2.419 +		return -ENODEV;
   2.420 +	}
   2.421 +
   2.422 +	tss = buffer.pointer;
   2.423 +	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
   2.424 +		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
   2.425 +		result = -EFAULT;
   2.426 +		goto end;
   2.427 +	}
   2.428 +
   2.429 +	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
   2.430 +			  tss->package.count));
   2.431 +
   2.432 +	pr->throttling.state_count = tss->package.count;
   2.433 +	pr->throttling.states_tss =
   2.434 +	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
   2.435 +		    GFP_KERNEL);
   2.436 +	if (!pr->throttling.states_tss) {
   2.437 +		result = -ENOMEM;
   2.438 +		goto end;
   2.439 +	}
   2.440 +
   2.441 +	for (i = 0; i < pr->throttling.state_count; i++) {
   2.442 +
   2.443 +		struct acpi_processor_tx_tss *tx =
   2.444 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
   2.445 +						      states_tss[i]);
   2.446 +
   2.447 +		state.length = sizeof(struct acpi_processor_tx_tss);
   2.448 +		state.pointer = tx;
   2.449 +
   2.450 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
   2.451 +
   2.452 +		status = acpi_extract_package(&(tss->package.elements[i]),
   2.453 +					      &format, &state);
   2.454 +		if (ACPI_FAILURE(status)) {
   2.455 +			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
   2.456 +			result = -EFAULT;
   2.457 +			kfree(pr->throttling.states_tss);
   2.458 +			goto end;
   2.459 +		}
   2.460 +
   2.461 +		if (!tx->freqpercentage) {
   2.462 +			printk(KERN_ERR PREFIX
   2.463 +			       "Invalid _TSS data: freq is zero\n");
   2.464 +			result = -EFAULT;
   2.465 +			kfree(pr->throttling.states_tss);
   2.466 +			goto end;
   2.467 +		}
   2.468 +	}
   2.469 +
   2.470 +      end:
   2.471 +	kfree(buffer.pointer);
   2.472 +
   2.473 +	return result;
   2.474 +}
   2.475 +
   2.476 +/*
   2.477 + * _TSD - T-State Dependencies
   2.478 + */
   2.479 +static int acpi_processor_get_tsd(struct acpi_processor *pr)
   2.480 +{
   2.481 +	int result = 0;
   2.482 +	acpi_status status = AE_OK;
   2.483 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   2.484 +	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
   2.485 +	struct acpi_buffer state = { 0, NULL };
   2.486 +	union acpi_object *tsd = NULL;
   2.487 +	struct acpi_tsd_package *pdomain;
   2.488 +	struct acpi_processor_throttling *pthrottling;
   2.489 +
   2.490 +	pthrottling = &pr->throttling;
   2.491 +	pthrottling->tsd_valid_flag = 0;
   2.492 +
   2.493 +	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
   2.494 +	if (ACPI_FAILURE(status)) {
   2.495 +		if (status != AE_NOT_FOUND) {
   2.496 +			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
   2.497 +		}
   2.498 +		return -ENODEV;
   2.499 +	}
   2.500 +
   2.501 +	tsd = buffer.pointer;
   2.502 +	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
   2.503 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
   2.504 +		result = -EFAULT;
   2.505 +		goto end;
   2.506 +	}
   2.507 +
   2.508 +	if (tsd->package.count != 1) {
   2.509 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
   2.510 +		result = -EFAULT;
   2.511 +		goto end;
   2.512 +	}
   2.513 +
   2.514 +	pdomain = &(pr->throttling.domain_info);
   2.515 +
   2.516 +	state.length = sizeof(struct acpi_tsd_package);
   2.517 +	state.pointer = pdomain;
   2.518 +
   2.519 +	status = acpi_extract_package(&(tsd->package.elements[0]),
   2.520 +				      &format, &state);
   2.521 +	if (ACPI_FAILURE(status)) {
   2.522 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
   2.523 +		result = -EFAULT;
   2.524 +		goto end;
   2.525 +	}
   2.526 +
   2.527 +	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
   2.528 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
   2.529 +		result = -EFAULT;
   2.530 +		goto end;
   2.531 +	}
   2.532 +
   2.533 +	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
   2.534 +		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
   2.535 +		result = -EFAULT;
   2.536 +		goto end;
   2.537 +	}
   2.538 +
   2.539 +	pthrottling = &pr->throttling;
   2.540 +	pthrottling->tsd_valid_flag = 1;
   2.541 +	pthrottling->shared_type = pdomain->coord_type;
   2.542 +	cpu_set(pr->id, pthrottling->shared_cpu_map);
   2.543 +	/*
   2.544 +	 * If the coordination type is not defined in ACPI spec,
   2.545 +	 * the tsd_valid_flag will be clear and coordination type
   2.546 +	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
   2.547 +	 */
   2.548 +	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
   2.549 +		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
   2.550 +		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
   2.551 +		pthrottling->tsd_valid_flag = 0;
   2.552 +		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
   2.553 +	}
   2.554 +
   2.555 +      end:
   2.556 +	kfree(buffer.pointer);
   2.557 +	return result;
   2.558 +}
   2.559  
   2.560  /* --------------------------------------------------------------------------
   2.561                                Throttling Control
   2.562     -------------------------------------------------------------------------- */
   2.563 -static int acpi_processor_get_throttling(struct acpi_processor *pr)
   2.564 +static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
   2.565  {
   2.566  	int state = 0;
   2.567  	u32 value = 0;
   2.568  	u32 duty_mask = 0;
   2.569  	u32 duty_value = 0;
   2.570  
   2.571 -
   2.572  	if (!pr)
   2.573  		return -EINVAL;
   2.574  
   2.575 @@ -95,13 +634,259 @@ static int acpi_processor_get_throttling
   2.576  	return 0;
   2.577  }
   2.578  
   2.579 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
   2.580 +#ifdef CONFIG_X86
   2.581 +static int acpi_throttling_rdmsr(struct acpi_processor *pr,
   2.582 +					acpi_integer * value)
   2.583 +{
   2.584 +	struct cpuinfo_x86 *c;
   2.585 +	u64 msr_high, msr_low;
   2.586 +	unsigned int cpu;
   2.587 +	u64 msr = 0;
   2.588 +	int ret = -1;
   2.589 +
   2.590 +	cpu = pr->id;
   2.591 +	c = &cpu_data[cpu];
   2.592 +
   2.593 +	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
   2.594 +		!cpu_has(c, X86_FEATURE_ACPI)) {
   2.595 +		printk(KERN_ERR PREFIX
   2.596 +			"HARDWARE addr space,NOT supported yet\n");
   2.597 +	} else {
   2.598 +		msr_low = 0;
   2.599 +		msr_high = 0;
   2.600 +		rdmsr_safe(MSR_IA32_THERM_CONTROL,
   2.601 +			(u32 *)&msr_low , (u32 *) &msr_high);
   2.602 +		msr = (msr_high << 32) | msr_low;
   2.603 +		*value = (acpi_integer) msr;
   2.604 +		ret = 0;
   2.605 +	}
   2.606 +	return ret;
   2.607 +}
   2.608 +
   2.609 +static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
   2.610 +{
   2.611 +	struct cpuinfo_x86 *c;
   2.612 +	unsigned int cpu;
   2.613 +	int ret = -1;
   2.614 +	u64 msr;
   2.615 +
   2.616 +	cpu = pr->id;
   2.617 +	c = &cpu_data[cpu];
   2.618 +
   2.619 +	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
   2.620 +		!cpu_has(c, X86_FEATURE_ACPI)) {
   2.621 +		printk(KERN_ERR PREFIX
   2.622 +			"HARDWARE addr space,NOT supported yet\n");
   2.623 +	} else {
   2.624 +		msr = value;
   2.625 +		wrmsr_safe(MSR_IA32_THERM_CONTROL,
   2.626 +			msr & 0xffffffff, msr >> 32);
   2.627 +		ret = 0;
   2.628 +	}
   2.629 +	return ret;
   2.630 +}
   2.631 +#else
   2.632 +static int acpi_throttling_rdmsr(struct acpi_processor *pr,
   2.633 +				acpi_integer * value)
   2.634 +{
   2.635 +	printk(KERN_ERR PREFIX
   2.636 +		"HARDWARE addr space,NOT supported yet\n");
   2.637 +	return -1;
   2.638 +}
   2.639 +
   2.640 +static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
   2.641 +{
   2.642 +	printk(KERN_ERR PREFIX
   2.643 +		"HARDWARE addr space,NOT supported yet\n");
   2.644 +	return -1;
   2.645 +}
   2.646 +#endif
   2.647 +
   2.648 +static int acpi_read_throttling_status(struct acpi_processor *pr,
   2.649 +					acpi_integer *value)
   2.650 +{
   2.651 +	u32 bit_width, bit_offset;
   2.652 +	u64 ptc_value;
   2.653 +	u64 ptc_mask;
   2.654 +	struct acpi_processor_throttling *throttling;
   2.655 +	int ret = -1;
   2.656 +
   2.657 +	throttling = &pr->throttling;
   2.658 +	switch (throttling->status_register.space_id) {
   2.659 +	case ACPI_ADR_SPACE_SYSTEM_IO:
   2.660 +		ptc_value = 0;
   2.661 +		bit_width = throttling->status_register.bit_width;
   2.662 +		bit_offset = throttling->status_register.bit_offset;
   2.663 +
   2.664 +		acpi_os_read_port((acpi_io_address) throttling->status_register.
   2.665 +				  address, (u32 *) &ptc_value,
   2.666 +				  (u32) (bit_width + bit_offset));
   2.667 +		ptc_mask = (1 << bit_width) - 1;
   2.668 +		*value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
   2.669 +		ret = 0;
   2.670 +		break;
   2.671 +	case ACPI_ADR_SPACE_FIXED_HARDWARE:
   2.672 +		ret = acpi_throttling_rdmsr(pr, value);
   2.673 +		break;
   2.674 +	default:
   2.675 +		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
   2.676 +		       (u32) (throttling->status_register.space_id));
   2.677 +	}
   2.678 +	return ret;
   2.679 +}
   2.680 +
   2.681 +static int acpi_write_throttling_state(struct acpi_processor *pr,
   2.682 +				acpi_integer value)
   2.683 +{
   2.684 +	u32 bit_width, bit_offset;
   2.685 +	u64 ptc_value;
   2.686 +	u64 ptc_mask;
   2.687 +	struct acpi_processor_throttling *throttling;
   2.688 +	int ret = -1;
   2.689 +
   2.690 +	throttling = &pr->throttling;
   2.691 +	switch (throttling->control_register.space_id) {
   2.692 +	case ACPI_ADR_SPACE_SYSTEM_IO:
   2.693 +		bit_width = throttling->control_register.bit_width;
   2.694 +		bit_offset = throttling->control_register.bit_offset;
   2.695 +		ptc_mask = (1 << bit_width) - 1;
   2.696 +		ptc_value = value & ptc_mask;
   2.697 +
   2.698 +		acpi_os_write_port((acpi_io_address) throttling->
   2.699 +					control_register.address,
   2.700 +					(u32) (ptc_value << bit_offset),
   2.701 +					(u32) (bit_width + bit_offset));
   2.702 +		ret = 0;
   2.703 +		break;
   2.704 +	case ACPI_ADR_SPACE_FIXED_HARDWARE:
   2.705 +		ret = acpi_throttling_wrmsr(pr, value);
   2.706 +		break;
   2.707 +	default:
   2.708 +		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
   2.709 +		       (u32) (throttling->control_register.space_id));
   2.710 +	}
   2.711 +	return ret;
   2.712 +}
   2.713 +
   2.714 +static int acpi_get_throttling_state(struct acpi_processor *pr,
   2.715 +				acpi_integer value)
   2.716 +{
   2.717 +	int i;
   2.718 +
   2.719 +	for (i = 0; i < pr->throttling.state_count; i++) {
   2.720 +		struct acpi_processor_tx_tss *tx =
   2.721 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
   2.722 +						      states_tss[i]);
   2.723 +		if (tx->control == value)
   2.724 +			break;
   2.725 +	}
   2.726 +	if (i > pr->throttling.state_count)
   2.727 +		i = -1;
   2.728 +	return i;
   2.729 +}
   2.730 +
   2.731 +static int acpi_get_throttling_value(struct acpi_processor *pr,
   2.732 +			int state, acpi_integer *value)
   2.733 +{
   2.734 +	int ret = -1;
   2.735 +
   2.736 +	if (state >= 0 && state <= pr->throttling.state_count) {
   2.737 +		struct acpi_processor_tx_tss *tx =
   2.738 +		    (struct acpi_processor_tx_tss *)&(pr->throttling.
   2.739 +						      states_tss[state]);
   2.740 +		*value = tx->control;
   2.741 +		ret = 0;
   2.742 +	}
   2.743 +	return ret;
   2.744 +}
   2.745 +
   2.746 +static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
   2.747 +{
   2.748 +	int state = 0;
   2.749 +	int ret;
   2.750 +	acpi_integer value;
   2.751 +
   2.752 +	if (!pr)
   2.753 +		return -EINVAL;
   2.754 +
   2.755 +	if (!pr->flags.throttling)
   2.756 +		return -ENODEV;
   2.757 +
   2.758 +	pr->throttling.state = 0;
   2.759 +
   2.760 +	value = 0;
   2.761 +	ret = acpi_read_throttling_status(pr, &value);
   2.762 +	if (ret >= 0) {
   2.763 +		state = acpi_get_throttling_state(pr, value);
   2.764 +		pr->throttling.state = state;
   2.765 +	}
   2.766 +
   2.767 +	return 0;
   2.768 +}
   2.769 +
   2.770 +static int acpi_processor_get_throttling(struct acpi_processor *pr)
   2.771 +{
   2.772 +	cpumask_t saved_mask;
   2.773 +	int ret;
   2.774 +
   2.775 +	if (!pr)
   2.776 +		return -EINVAL;
   2.777 +
   2.778 +	if (!pr->flags.throttling)
   2.779 +		return -ENODEV;
   2.780 +	/*
   2.781 +	 * Migrate task to the cpu pointed by pr.
   2.782 +	 */
   2.783 +	saved_mask = current->cpus_allowed;
   2.784 +	set_cpus_allowed(current, cpumask_of_cpu(pr->id));
   2.785 +	ret = pr->throttling.acpi_processor_get_throttling(pr);
   2.786 +	/* restore the previous state */
   2.787 +	set_cpus_allowed(current, saved_mask);
   2.788 +
   2.789 +	return ret;
   2.790 +}
   2.791 +
   2.792 +static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
   2.793 +{
   2.794 +	int i, step;
   2.795 +
   2.796 +	if (!pr->throttling.address) {
   2.797 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
   2.798 +		return -EINVAL;
   2.799 +	} else if (!pr->throttling.duty_width) {
   2.800 +		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
   2.801 +		return -EINVAL;
   2.802 +	}
   2.803 +	/* TBD: Support duty_cycle values that span bit 4. */
   2.804 +	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
   2.805 +		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
   2.806 +		return -EINVAL;
   2.807 +	}
   2.808 +
   2.809 +	pr->throttling.state_count = 1 << acpi_fadt.duty_width;
   2.810 +
   2.811 +	/*
   2.812 +	 * Compute state values. Note that throttling displays a linear power
   2.813 +	 * performance relationship (at 50% performance the CPU will consume
   2.814 +	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
   2.815 +	 */
   2.816 +
   2.817 +	step = (1000 / pr->throttling.state_count);
   2.818 +
   2.819 +	for (i = 0; i < pr->throttling.state_count; i++) {
   2.820 +		pr->throttling.states[i].performance = 1000 - step * i;
   2.821 +		pr->throttling.states[i].power = 1000 - step * i;
   2.822 +	}
   2.823 +	return 0;
   2.824 +}
   2.825 +
   2.826 +static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
   2.827 +					      int state)
   2.828  {
   2.829  	u32 value = 0;
   2.830  	u32 duty_mask = 0;
   2.831  	u32 duty_value = 0;
   2.832  
   2.833 -
   2.834  	if (!pr)
   2.835  		return -EINVAL;
   2.836  
   2.837 @@ -114,6 +899,8 @@ int acpi_processor_set_throttling(struct
   2.838  	if (state == pr->throttling.state)
   2.839  		return 0;
   2.840  
   2.841 +	if (state < pr->throttling_platform_limit)
   2.842 +		return -EPERM;
   2.843  	/*
   2.844  	 * Calculate the duty_value and duty_mask.
   2.845  	 */
   2.846 @@ -166,12 +953,135 @@ int acpi_processor_set_throttling(struct
   2.847  	return 0;
   2.848  }
   2.849  
   2.850 +static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
   2.851 +					     int state)
   2.852 +{
   2.853 +	int ret;
   2.854 +	acpi_integer value;
   2.855 +
   2.856 +	if (!pr)
   2.857 +		return -EINVAL;
   2.858 +
   2.859 +	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
   2.860 +		return -EINVAL;
   2.861 +
   2.862 +	if (!pr->flags.throttling)
   2.863 +		return -ENODEV;
   2.864 +
   2.865 +	if (state == pr->throttling.state)
   2.866 +		return 0;
   2.867 +
   2.868 +	if (state < pr->throttling_platform_limit)
   2.869 +		return -EPERM;
   2.870 +
   2.871 +	value = 0;
   2.872 +	ret = acpi_get_throttling_value(pr, state, &value);
   2.873 +	if (ret >= 0) {
   2.874 +		acpi_write_throttling_state(pr, value);
   2.875 +		pr->throttling.state = state;
   2.876 +	}
   2.877 +
   2.878 +	return 0;
   2.879 +}
   2.880 +
   2.881 +int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
   2.882 +{
   2.883 +	cpumask_t saved_mask;
   2.884 +	int ret = 0;
   2.885 +	unsigned int i;
   2.886 +	struct acpi_processor *match_pr;
   2.887 +	struct acpi_processor_throttling *p_throttling;
   2.888 +	struct throttling_tstate t_state;
   2.889 +	cpumask_t online_throttling_cpus;
   2.890 +
   2.891 +	if (!pr)
   2.892 +		return -EINVAL;
   2.893 +
   2.894 +	if (!pr->flags.throttling)
   2.895 +		return -ENODEV;
   2.896 +
   2.897 +	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
   2.898 +		return -EINVAL;
   2.899 +
   2.900 +	saved_mask = current->cpus_allowed;
   2.901 +	t_state.target_state = state;
   2.902 +	p_throttling = &(pr->throttling);
   2.903 +	cpus_and(online_throttling_cpus, cpu_online_map,
   2.904 +			p_throttling->shared_cpu_map);
   2.905 +	/*
   2.906 +	 * The throttling notifier will be called for every
   2.907 +	 * affected cpu in order to get one proper T-state.
   2.908 +	 * The notifier event is THROTTLING_PRECHANGE.
   2.909 +	 */
   2.910 +	for_each_cpu_mask(i, online_throttling_cpus) {
   2.911 +		t_state.cpu = i;
   2.912 +		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
   2.913 +							&t_state);
   2.914 +	}
   2.915 +	/*
   2.916 +	 * The function of acpi_processor_set_throttling will be called
   2.917 +	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
   2.918 +	 * it is necessary to call it for every affected cpu. Otherwise
   2.919 +	 * it can be called only for the cpu pointed by pr.
   2.920 +	 */
   2.921 +	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
   2.922 +		set_cpus_allowed(current, cpumask_of_cpu(pr->id));
   2.923 +		ret = p_throttling->acpi_processor_set_throttling(pr,
   2.924 +						t_state.target_state);
   2.925 +	} else {
   2.926 +		/*
   2.927 +		 * When the T-state coordination is SW_ALL or HW_ALL,
   2.928 +		 * it is necessary to set T-state for every affected
   2.929 +		 * cpus.
   2.930 +		 */
   2.931 +		for_each_cpu_mask(i, online_throttling_cpus) {
   2.932 +			match_pr = processors[i];
   2.933 +			/*
   2.934 +			 * If the pointer is invalid, we will report the
   2.935 +			 * error message and continue.
   2.936 +			 */
   2.937 +			if (!match_pr) {
   2.938 +				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   2.939 +					"Invalid Pointer for CPU %d\n", i));
   2.940 +				continue;
   2.941 +			}
   2.942 +			/*
   2.943 +			 * If the throttling control is unsupported on CPU i,
   2.944 +			 * we will report the error message and continue.
   2.945 +			 */
   2.946 +			if (!match_pr->flags.throttling) {
   2.947 +				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   2.948 +					"Throttling Controll is unsupported "
   2.949 +					"on CPU %d\n", i));
   2.950 +				continue;
   2.951 +			}
   2.952 +			t_state.cpu = i;
   2.953 +			set_cpus_allowed(current, cpumask_of_cpu(i));
   2.954 +			ret = match_pr->throttling.
   2.955 +				acpi_processor_set_throttling(
   2.956 +				match_pr, t_state.target_state);
   2.957 +		}
   2.958 +	}
   2.959 +	/*
   2.960 +	 * After the set_throttling is called, the
   2.961 +	 * throttling notifier is called for every
   2.962 +	 * affected cpu to update the T-states.
   2.963 +	 * The notifier event is THROTTLING_POSTCHANGE
   2.964 +	 */
   2.965 +	for_each_cpu_mask(i, online_throttling_cpus) {
   2.966 +		t_state.cpu = i;
   2.967 +		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
   2.968 +							&t_state);
   2.969 +	}
   2.970 +	/* restore the previous state */
   2.971 +	set_cpus_allowed(current, saved_mask);
   2.972 +	return ret;
   2.973 +}
   2.974 +
   2.975  int acpi_processor_get_throttling_info(struct acpi_processor *pr)
   2.976  {
   2.977  	int result = 0;
   2.978 -	int step = 0;
   2.979 -	int i = 0;
   2.980 -
   2.981 +	struct acpi_processor_throttling *pthrottling;
   2.982  
   2.983  	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   2.984  			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
   2.985 @@ -182,19 +1092,36 @@ int acpi_processor_get_throttling_info(s
   2.986  	if (!pr)
   2.987  		return -EINVAL;
   2.988  
   2.989 -	/* TBD: Support ACPI 2.0 objects */
   2.990 +	/*
   2.991 +	 * Evaluate _PTC, _TSS and _TPC
   2.992 +	 * They must all be present or none of them can be used.
   2.993 +	 */
   2.994 +	if (acpi_processor_get_throttling_control(pr) ||
   2.995 +		acpi_processor_get_throttling_states(pr) ||
   2.996 +		acpi_processor_get_platform_limit(pr))
   2.997 +	{
   2.998 +		pr->throttling.acpi_processor_get_throttling =
   2.999 +		    &acpi_processor_get_throttling_fadt;
  2.1000 +		pr->throttling.acpi_processor_set_throttling =
  2.1001 +		    &acpi_processor_set_throttling_fadt;
  2.1002 +		if (acpi_processor_get_fadt_info(pr))
  2.1003 +			return 0;
  2.1004 +	} else {
  2.1005 +		pr->throttling.acpi_processor_get_throttling =
  2.1006 +		    &acpi_processor_get_throttling_ptc;
  2.1007 +		pr->throttling.acpi_processor_set_throttling =
  2.1008 +		    &acpi_processor_set_throttling_ptc;
  2.1009 +	}
  2.1010  
  2.1011 -	if (!pr->throttling.address) {
  2.1012 -		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  2.1013 -		return 0;
  2.1014 -	} else if (!pr->throttling.duty_width) {
  2.1015 -		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  2.1016 -		return 0;
  2.1017 -	}
  2.1018 -	/* TBD: Support duty_cycle values that span bit 4. */
  2.1019 -	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  2.1020 -		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  2.1021 -		return 0;
  2.1022 +	/*
  2.1023 +	 * If TSD package for one CPU can't be parsed successfully, it means
  2.1024 +	 * that this CPU will have no coordination with other CPUs.
  2.1025 +	 */
  2.1026 +	if (acpi_processor_get_tsd(pr)) {
  2.1027 +		pthrottling = &pr->throttling;
  2.1028 +		pthrottling->tsd_valid_flag = 0;
  2.1029 +		cpu_set(pr->id, pthrottling->shared_cpu_map);
  2.1030 +		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  2.1031  	}
  2.1032  
  2.1033  	/*
  2.1034 @@ -208,21 +1135,6 @@ int acpi_processor_get_throttling_info(s
  2.1035  		return 0;
  2.1036  	}
  2.1037  
  2.1038 -	pr->throttling.state_count = 1 << acpi_fadt.duty_width;
  2.1039 -
  2.1040 -	/*
  2.1041 -	 * Compute state values. Note that throttling displays a linear power/
  2.1042 -	 * performance relationship (at 50% performance the CPU will consume
  2.1043 -	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
  2.1044 -	 */
  2.1045 -
  2.1046 -	step = (1000 / pr->throttling.state_count);
  2.1047 -
  2.1048 -	for (i = 0; i < pr->throttling.state_count; i++) {
  2.1049 -		pr->throttling.states[i].performance = step * i;
  2.1050 -		pr->throttling.states[i].power = step * i;
  2.1051 -	}
  2.1052 -
  2.1053  	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  2.1054  			  pr->throttling.state_count));
  2.1055  
  2.1056 @@ -259,11 +1171,10 @@ int acpi_processor_get_throttling_info(s
  2.1057  static int acpi_processor_throttling_seq_show(struct seq_file *seq,
  2.1058  					      void *offset)
  2.1059  {
  2.1060 -	struct acpi_processor *pr = (struct acpi_processor *)seq->private;
  2.1061 +	struct acpi_processor *pr = seq->private;
  2.1062  	int i = 0;
  2.1063  	int result = 0;
  2.1064  
  2.1065 -
  2.1066  	if (!pr)
  2.1067  		goto end;
  2.1068  
  2.1069 @@ -281,15 +1192,27 @@ static int acpi_processor_throttling_seq
  2.1070  	}
  2.1071  
  2.1072  	seq_printf(seq, "state count:             %d\n"
  2.1073 -		   "active state:            T%d\n",
  2.1074 -		   pr->throttling.state_count, pr->throttling.state);
  2.1075 +		   "active state:            T%d\n"
  2.1076 +		   "state available: T%d to T%d\n",
  2.1077 +		   pr->throttling.state_count, pr->throttling.state,
  2.1078 +		   pr->throttling_platform_limit,
  2.1079 +		   pr->throttling.state_count - 1);
  2.1080  
  2.1081  	seq_puts(seq, "states:\n");
  2.1082 -	for (i = 0; i < pr->throttling.state_count; i++)
  2.1083 -		seq_printf(seq, "   %cT%d:                  %02d%%\n",
  2.1084 -			   (i == pr->throttling.state ? '*' : ' '), i,
  2.1085 -			   (pr->throttling.states[i].performance ? pr->
  2.1086 -			    throttling.states[i].performance / 10 : 0));
  2.1087 +	if (pr->throttling.acpi_processor_get_throttling ==
  2.1088 +			acpi_processor_get_throttling_fadt) {
  2.1089 +		for (i = 0; i < pr->throttling.state_count; i++)
  2.1090 +			seq_printf(seq, "   %cT%d:                  %02d%%\n",
  2.1091 +				   (i == pr->throttling.state ? '*' : ' '), i,
  2.1092 +				   (pr->throttling.states[i].performance ? pr->
  2.1093 +				    throttling.states[i].performance / 10 : 0));
  2.1094 +	} else {
  2.1095 +		for (i = 0; i < pr->throttling.state_count; i++)
  2.1096 +			seq_printf(seq, "   %cT%d:                  %02d%%\n",
  2.1097 +				   (i == pr->throttling.state ? '*' : ' '), i,
  2.1098 +				   (int)pr->throttling.states_tss[i].
  2.1099 +				   freqpercentage);
  2.1100 +	}
  2.1101  
  2.1102        end:
  2.1103  	return 0;
  2.1104 @@ -302,15 +1225,17 @@ static int acpi_processor_throttling_ope
  2.1105  			   PDE(inode)->data);
  2.1106  }
  2.1107  
  2.1108 -static ssize_t acpi_processor_write_throttling(struct file * file,
  2.1109 +static ssize_t acpi_processor_write_throttling(struct file *file,
  2.1110  					       const char __user * buffer,
  2.1111  					       size_t count, loff_t * data)
  2.1112  {
  2.1113  	int result = 0;
  2.1114 -	struct seq_file *m = (struct seq_file *)file->private_data;
  2.1115 -	struct acpi_processor *pr = (struct acpi_processor *)m->private;
  2.1116 -	char state_string[12] = { '\0' };
  2.1117 -
  2.1118 +	struct seq_file *m = file->private_data;
  2.1119 +	struct acpi_processor *pr = m->private;
  2.1120 +	char state_string[5] = "";
  2.1121 +	char *charp = NULL;
  2.1122 +	size_t state_val = 0;
  2.1123 +	char tmpbuf[5] = "";
  2.1124  
  2.1125  	if (!pr || (count > sizeof(state_string) - 1))
  2.1126  		return -EINVAL;
  2.1127 @@ -319,10 +1244,23 @@ static ssize_t acpi_processor_write_thro
  2.1128  		return -EFAULT;
  2.1129  
  2.1130  	state_string[count] = '\0';
  2.1131 +	if ((count > 0) && (state_string[count-1] == '\n'))
  2.1132 +		state_string[count-1] = '\0';
  2.1133  
  2.1134 -	result = acpi_processor_set_throttling(pr,
  2.1135 -					       simple_strtoul(state_string,
  2.1136 -							      NULL, 0));
  2.1137 +	charp = state_string;
  2.1138 +	if ((state_string[0] == 't') || (state_string[0] == 'T'))
  2.1139 +		charp++;
  2.1140 +
  2.1141 +	state_val = simple_strtoul(charp, NULL, 0);
  2.1142 +	if (state_val >= pr->throttling.state_count)
  2.1143 +		return -EINVAL;
  2.1144 +
  2.1145 +	snprintf(tmpbuf, 5, "%zu", state_val);
  2.1146 +
  2.1147 +	if (strcmp(tmpbuf, charp) != 0)
  2.1148 +		return -EINVAL;
  2.1149 +
  2.1150 +	result = acpi_processor_set_throttling(pr, state_val);
  2.1151  	if (result)
  2.1152  		return result;
  2.1153  
  2.1154 @@ -330,6 +1268,7 @@ static ssize_t acpi_processor_write_thro
  2.1155  }
  2.1156  
  2.1157  struct file_operations acpi_processor_throttling_fops = {
  2.1158 +	.owner = THIS_MODULE,
  2.1159  	.open = acpi_processor_throttling_open_fs,
  2.1160  	.read = seq_read,
  2.1161  	.write = acpi_processor_write_throttling,
     3.1 --- a/include/acpi/processor.h	Wed Sep 10 10:54:08 2008 +0100
     3.2 +++ b/include/acpi/processor.h	Thu Sep 11 16:52:15 2008 +0100
     3.3 @@ -18,9 +18,12 @@
     3.4  
     3.5  #define ACPI_PDC_REVISION_ID		0x1
     3.6  
     3.7 -#define ACPI_PSD_REV0_REVISION		0 /* Support for _PSD as in ACPI 3.0 */
     3.8 +#define ACPI_PSD_REV0_REVISION		0	/* Support for _PSD as in ACPI 3.0 */
     3.9  #define ACPI_PSD_REV0_ENTRIES		5
    3.10  
    3.11 +#define ACPI_TSD_REV0_REVISION		0	/* Support for _PSD as in ACPI 3.0 */
    3.12 +#define ACPI_TSD_REV0_ENTRIES		5
    3.13 +
    3.14  #ifdef CONFIG_XEN
    3.15  #define NR_ACPI_CPUS			(NR_CPUS < 256 ? 256 : NR_CPUS)
    3.16  #else
    3.17 @@ -142,24 +145,62 @@ struct acpi_processor_performance {
    3.18  
    3.19  /* Throttling Control */
    3.20  
    3.21 +struct acpi_tsd_package {
    3.22 +	acpi_integer num_entries;
    3.23 +	acpi_integer revision;
    3.24 +	acpi_integer domain;
    3.25 +	acpi_integer coord_type;
    3.26 +	acpi_integer num_processors;
    3.27 +} __attribute__ ((packed));
    3.28 +
    3.29 +struct acpi_ptc_register {
    3.30 +	u8 descriptor;
    3.31 +	u16 length;
    3.32 +	u8 space_id;
    3.33 +	u8 bit_width;
    3.34 +	u8 bit_offset;
    3.35 +	u8 reserved;
    3.36 +	u64 address;
    3.37 +} __attribute__ ((packed));
    3.38 +
    3.39 +struct acpi_processor_tx_tss {
    3.40 +	acpi_integer freqpercentage;	/* */
    3.41 +	acpi_integer power;	/* milliWatts */
    3.42 +	acpi_integer transition_latency;	/* microseconds */
    3.43 +	acpi_integer control;	/* control value */
    3.44 +	acpi_integer status;	/* success indicator */
    3.45 +};
    3.46  struct acpi_processor_tx {
    3.47  	u16 power;
    3.48  	u16 performance;
    3.49  };
    3.50  
    3.51 +struct acpi_processor;
    3.52  struct acpi_processor_throttling {
    3.53 -	int state;
    3.54 +	unsigned int state;
    3.55 +	unsigned int platform_limit;
    3.56 +	struct acpi_pct_register control_register;
    3.57 +	struct acpi_pct_register status_register;
    3.58 +	unsigned int state_count;
    3.59 +	struct acpi_processor_tx_tss *states_tss;
    3.60 +	struct acpi_tsd_package domain_info;
    3.61 +	cpumask_t shared_cpu_map;
    3.62 +	int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
    3.63 +	int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
    3.64 +					      int state);
    3.65 +
    3.66  	u32 address;
    3.67  	u8 duty_offset;
    3.68  	u8 duty_width;
    3.69 -	int state_count;
    3.70 +	u8 tsd_valid_flag;
    3.71 +	unsigned int shared_type;
    3.72  	struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING];
    3.73  };
    3.74  
    3.75  /* Limit Interface */
    3.76  
    3.77  struct acpi_processor_lx {
    3.78 -	int px;			/* performace state */
    3.79 +	int px;			/* performance state */
    3.80  	int tx;			/* throttle level */
    3.81  };
    3.82  
    3.83 @@ -186,6 +227,9 @@ struct acpi_processor {
    3.84  	u32 id;
    3.85  	u32 pblk;
    3.86  	int performance_platform_limit;
    3.87 +	int throttling_platform_limit;
    3.88 +	/* 0 - states 0..n-th state available */
    3.89 +
    3.90  	struct acpi_processor_flags flags;
    3.91  	struct acpi_processor_power power;
    3.92  	struct acpi_processor_performance *performance;
    3.93 @@ -273,10 +317,11 @@ static inline int acpi_processor_ppc_has
    3.94  #endif				/* CONFIG_CPU_FREQ */
    3.95  
    3.96  /* in processor_throttling.c */
    3.97 +int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
    3.98  int acpi_processor_get_throttling_info(struct acpi_processor *pr);
    3.99 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
   3.100 +extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
   3.101  extern struct file_operations acpi_processor_throttling_fops;
   3.102 -
   3.103 +extern void acpi_processor_throttling_init(void);
   3.104  /* in processor_idle.c */
   3.105  int acpi_processor_power_init(struct acpi_processor *pr,
   3.106  			      struct acpi_device *device);