ia64/linux-2.6.18-xen.hg

view drivers/acpi/events/evgpeblk.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /******************************************************************************
2 *
3 * Module Name: evgpeblk - GPE block creation and initialization.
4 *
5 *****************************************************************************/
7 /*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
44 #include <acpi/acpi.h>
45 #include <acpi/acevents.h>
46 #include <acpi/acnamesp.h>
48 #define _COMPONENT ACPI_EVENTS
49 ACPI_MODULE_NAME("evgpeblk")
51 /* Local prototypes */
52 static acpi_status
53 acpi_ev_save_method_info(acpi_handle obj_handle,
54 u32 level, void *obj_desc, void **return_value);
56 static acpi_status
57 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
58 u32 level, void *info, void **return_value);
60 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
61 interrupt_number);
63 static acpi_status
64 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
66 static acpi_status
67 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
68 u32 interrupt_number);
70 static acpi_status
71 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
73 /*******************************************************************************
74 *
75 * FUNCTION: acpi_ev_valid_gpe_event
76 *
77 * PARAMETERS: gpe_event_info - Info for this GPE
78 *
79 * RETURN: TRUE if the gpe_event is valid
80 *
81 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
82 * Should be called only when the GPE lists are semaphore locked
83 * and not subject to change.
84 *
85 ******************************************************************************/
87 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
88 {
89 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
90 struct acpi_gpe_block_info *gpe_block;
92 ACPI_FUNCTION_ENTRY();
94 /* No need for spin lock since we are not changing any list elements */
96 /* Walk the GPE interrupt levels */
98 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
99 while (gpe_xrupt_block) {
100 gpe_block = gpe_xrupt_block->gpe_block_list_head;
102 /* Walk the GPE blocks on this interrupt level */
104 while (gpe_block) {
105 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
106 (&gpe_block->
107 event_info[((acpi_size) gpe_block->
108 register_count) * 8] >
109 gpe_event_info)) {
110 return (TRUE);
111 }
113 gpe_block = gpe_block->next;
114 }
116 gpe_xrupt_block = gpe_xrupt_block->next;
117 }
119 return (FALSE);
120 }
122 /*******************************************************************************
123 *
124 * FUNCTION: acpi_ev_walk_gpe_list
125 *
126 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
127 *
128 * RETURN: Status
129 *
130 * DESCRIPTION: Walk the GPE lists.
131 *
132 ******************************************************************************/
134 acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
135 {
136 struct acpi_gpe_block_info *gpe_block;
137 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
138 acpi_status status = AE_OK;
139 acpi_cpu_flags flags;
141 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
143 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
145 /* Walk the interrupt level descriptor list */
147 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
148 while (gpe_xrupt_info) {
150 /* Walk all Gpe Blocks attached to this interrupt level */
152 gpe_block = gpe_xrupt_info->gpe_block_list_head;
153 while (gpe_block) {
155 /* One callback per GPE block */
157 status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
158 if (ACPI_FAILURE(status)) {
159 goto unlock_and_exit;
160 }
162 gpe_block = gpe_block->next;
163 }
165 gpe_xrupt_info = gpe_xrupt_info->next;
166 }
168 unlock_and_exit:
169 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
170 return_ACPI_STATUS(status);
171 }
173 /*******************************************************************************
174 *
175 * FUNCTION: acpi_ev_delete_gpe_handlers
176 *
177 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
178 * gpe_block - Gpe Block info
179 *
180 * RETURN: Status
181 *
182 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
183 * Used only prior to termination.
184 *
185 ******************************************************************************/
187 acpi_status
188 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
189 struct acpi_gpe_block_info *gpe_block)
190 {
191 struct acpi_gpe_event_info *gpe_event_info;
192 acpi_native_uint i;
193 acpi_native_uint j;
195 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
197 /* Examine each GPE Register within the block */
199 for (i = 0; i < gpe_block->register_count; i++) {
201 /* Now look at the individual GPEs in this byte register */
203 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
204 gpe_event_info =
205 &gpe_block->
206 event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
208 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
209 ACPI_GPE_DISPATCH_HANDLER) {
210 ACPI_FREE(gpe_event_info->dispatch.handler);
211 gpe_event_info->dispatch.handler = NULL;
212 gpe_event_info->flags &=
213 ~ACPI_GPE_DISPATCH_MASK;
214 }
215 }
216 }
218 return_ACPI_STATUS(AE_OK);
219 }
221 /*******************************************************************************
222 *
223 * FUNCTION: acpi_ev_save_method_info
224 *
225 * PARAMETERS: Callback from walk_namespace
226 *
227 * RETURN: Status
228 *
229 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
230 * control method under the _GPE portion of the namespace.
231 * Extract the name and GPE type from the object, saving this
232 * information for quick lookup during GPE dispatch
233 *
234 * The name of each GPE control method is of the form:
235 * "_Lxx" or "_Exx"
236 * Where:
237 * L - means that the GPE is level triggered
238 * E - means that the GPE is edge triggered
239 * xx - is the GPE number [in HEX]
240 *
241 ******************************************************************************/
243 static acpi_status
244 acpi_ev_save_method_info(acpi_handle obj_handle,
245 u32 level, void *obj_desc, void **return_value)
246 {
247 struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
248 struct acpi_gpe_event_info *gpe_event_info;
249 u32 gpe_number;
250 char name[ACPI_NAME_SIZE + 1];
251 u8 type;
252 acpi_status status;
254 ACPI_FUNCTION_TRACE(ev_save_method_info);
256 /*
257 * _Lxx and _Exx GPE method support
258 *
259 * 1) Extract the name from the object and convert to a string
260 */
261 ACPI_MOVE_32_TO_32(name,
262 &((struct acpi_namespace_node *)obj_handle)->name.
263 integer);
264 name[ACPI_NAME_SIZE] = 0;
266 /*
267 * 2) Edge/Level determination is based on the 2nd character
268 * of the method name
269 *
270 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
271 * if a _PRW object is found that points to this GPE.
272 */
273 switch (name[1]) {
274 case 'L':
275 type = ACPI_GPE_LEVEL_TRIGGERED;
276 break;
278 case 'E':
279 type = ACPI_GPE_EDGE_TRIGGERED;
280 break;
282 default:
283 /* Unknown method type, just ignore it! */
285 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
286 "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
287 name));
288 return_ACPI_STATUS(AE_OK);
289 }
291 /* Convert the last two characters of the name to the GPE Number */
293 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
294 if (gpe_number == ACPI_UINT32_MAX) {
296 /* Conversion failed; invalid method, just ignore it */
298 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
299 "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
300 name));
301 return_ACPI_STATUS(AE_OK);
302 }
304 /* Ensure that we have a valid GPE number for this GPE block */
306 if ((gpe_number < gpe_block->block_base_number) ||
307 (gpe_number >=
308 (gpe_block->block_base_number +
309 (gpe_block->register_count * 8)))) {
310 /*
311 * Not valid for this GPE block, just ignore it
312 * However, it may be valid for a different GPE block, since GPE0 and GPE1
313 * methods both appear under \_GPE.
314 */
315 return_ACPI_STATUS(AE_OK);
316 }
318 /*
319 * Now we can add this information to the gpe_event_info block
320 * for use during dispatch of this GPE. Default type is RUNTIME, although
321 * this may change when the _PRW methods are executed later.
322 */
323 gpe_event_info =
324 &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
326 gpe_event_info->flags = (u8)
327 (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
329 gpe_event_info->dispatch.method_node =
330 (struct acpi_namespace_node *)obj_handle;
332 /* Update enable mask, but don't enable the HW GPE as of yet */
334 status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
336 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
337 "Registered GPE method %s as GPE number 0x%.2X\n",
338 name, gpe_number));
339 return_ACPI_STATUS(status);
340 }
342 /*******************************************************************************
343 *
344 * FUNCTION: acpi_ev_match_prw_and_gpe
345 *
346 * PARAMETERS: Callback from walk_namespace
347 *
348 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
349 * not aborted on a single _PRW failure.
350 *
351 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
352 * Device. Run the _PRW method. If present, extract the GPE
353 * number and mark the GPE as a WAKE GPE.
354 *
355 ******************************************************************************/
357 static acpi_status
358 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
359 u32 level, void *info, void **return_value)
360 {
361 struct acpi_gpe_walk_info *gpe_info = (void *)info;
362 struct acpi_namespace_node *gpe_device;
363 struct acpi_gpe_block_info *gpe_block;
364 struct acpi_namespace_node *target_gpe_device;
365 struct acpi_gpe_event_info *gpe_event_info;
366 union acpi_operand_object *pkg_desc;
367 union acpi_operand_object *obj_desc;
368 u32 gpe_number;
369 acpi_status status;
371 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
373 /* Check for a _PRW method under this device */
375 status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
376 ACPI_BTYPE_PACKAGE, &pkg_desc);
377 if (ACPI_FAILURE(status)) {
379 /* Ignore all errors from _PRW, we don't want to abort the subsystem */
381 return_ACPI_STATUS(AE_OK);
382 }
384 /* The returned _PRW package must have at least two elements */
386 if (pkg_desc->package.count < 2) {
387 goto cleanup;
388 }
390 /* Extract pointers from the input context */
392 gpe_device = gpe_info->gpe_device;
393 gpe_block = gpe_info->gpe_block;
395 /*
396 * The _PRW object must return a package, we are only interested
397 * in the first element
398 */
399 obj_desc = pkg_desc->package.elements[0];
401 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
403 /* Use FADT-defined GPE device (from definition of _PRW) */
405 target_gpe_device = acpi_gbl_fadt_gpe_device;
407 /* Integer is the GPE number in the FADT described GPE blocks */
409 gpe_number = (u32) obj_desc->integer.value;
410 } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
412 /* Package contains a GPE reference and GPE number within a GPE block */
414 if ((obj_desc->package.count < 2) ||
415 (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
416 ACPI_TYPE_LOCAL_REFERENCE)
417 || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
418 ACPI_TYPE_INTEGER)) {
419 goto cleanup;
420 }
422 /* Get GPE block reference and decode */
424 target_gpe_device =
425 obj_desc->package.elements[0]->reference.node;
426 gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
427 } else {
428 /* Unknown type, just ignore it */
430 goto cleanup;
431 }
433 /*
434 * Is this GPE within this block?
435 *
436 * TRUE iff these conditions are true:
437 * 1) The GPE devices match.
438 * 2) The GPE index(number) is within the range of the Gpe Block
439 * associated with the GPE device.
440 */
441 if ((gpe_device == target_gpe_device) &&
442 (gpe_number >= gpe_block->block_base_number) &&
443 (gpe_number <
444 gpe_block->block_base_number + (gpe_block->register_count * 8))) {
445 gpe_event_info =
446 &gpe_block->event_info[gpe_number -
447 gpe_block->block_base_number];
449 /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
451 gpe_event_info->flags &=
452 ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
454 status =
455 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
456 if (ACPI_FAILURE(status)) {
457 goto cleanup;
458 }
459 status =
460 acpi_ev_update_gpe_enable_masks(gpe_event_info,
461 ACPI_GPE_DISABLE);
462 }
464 cleanup:
465 acpi_ut_remove_reference(pkg_desc);
466 return_ACPI_STATUS(AE_OK);
467 }
469 /*******************************************************************************
470 *
471 * FUNCTION: acpi_ev_get_gpe_xrupt_block
472 *
473 * PARAMETERS: interrupt_number - Interrupt for a GPE block
474 *
475 * RETURN: A GPE interrupt block
476 *
477 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
478 * block per unique interrupt level used for GPEs.
479 * Should be called only when the GPE lists are semaphore locked
480 * and not subject to change.
481 *
482 ******************************************************************************/
484 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
485 interrupt_number)
486 {
487 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
488 struct acpi_gpe_xrupt_info *gpe_xrupt;
489 acpi_status status;
490 acpi_cpu_flags flags;
492 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
494 /* No need for lock since we are not changing any list elements here */
496 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
497 while (next_gpe_xrupt) {
498 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
499 return_PTR(next_gpe_xrupt);
500 }
502 next_gpe_xrupt = next_gpe_xrupt->next;
503 }
505 /* Not found, must allocate a new xrupt descriptor */
507 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
508 if (!gpe_xrupt) {
509 return_PTR(NULL);
510 }
512 gpe_xrupt->interrupt_number = interrupt_number;
514 /* Install new interrupt descriptor with spin lock */
516 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
517 if (acpi_gbl_gpe_xrupt_list_head) {
518 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
519 while (next_gpe_xrupt->next) {
520 next_gpe_xrupt = next_gpe_xrupt->next;
521 }
523 next_gpe_xrupt->next = gpe_xrupt;
524 gpe_xrupt->previous = next_gpe_xrupt;
525 } else {
526 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
527 }
528 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
530 /* Install new interrupt handler if not SCI_INT */
532 if (interrupt_number != acpi_gbl_FADT->sci_int) {
533 status = acpi_os_install_interrupt_handler(interrupt_number,
534 acpi_ev_gpe_xrupt_handler,
535 gpe_xrupt);
536 if (ACPI_FAILURE(status)) {
537 ACPI_ERROR((AE_INFO,
538 "Could not install GPE interrupt handler at level 0x%X",
539 interrupt_number));
540 return_PTR(NULL);
541 }
542 }
544 return_PTR(gpe_xrupt);
545 }
547 /*******************************************************************************
548 *
549 * FUNCTION: acpi_ev_delete_gpe_xrupt
550 *
551 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
552 *
553 * RETURN: Status
554 *
555 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
556 * interrupt handler if not the SCI interrupt.
557 *
558 ******************************************************************************/
560 static acpi_status
561 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
562 {
563 acpi_status status;
564 acpi_cpu_flags flags;
566 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
568 /* We never want to remove the SCI interrupt handler */
570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) {
571 gpe_xrupt->gpe_block_list_head = NULL;
572 return_ACPI_STATUS(AE_OK);
573 }
575 /* Disable this interrupt */
577 status =
578 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
579 acpi_ev_gpe_xrupt_handler);
580 if (ACPI_FAILURE(status)) {
581 return_ACPI_STATUS(status);
582 }
584 /* Unlink the interrupt block with lock */
586 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
587 if (gpe_xrupt->previous) {
588 gpe_xrupt->previous->next = gpe_xrupt->next;
589 }
591 if (gpe_xrupt->next) {
592 gpe_xrupt->next->previous = gpe_xrupt->previous;
593 }
594 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
596 /* Free the block */
598 ACPI_FREE(gpe_xrupt);
599 return_ACPI_STATUS(AE_OK);
600 }
602 /*******************************************************************************
603 *
604 * FUNCTION: acpi_ev_install_gpe_block
605 *
606 * PARAMETERS: gpe_block - New GPE block
607 * interrupt_number - Xrupt to be associated with this GPE block
608 *
609 * RETURN: Status
610 *
611 * DESCRIPTION: Install new GPE block with mutex support
612 *
613 ******************************************************************************/
615 static acpi_status
616 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
617 u32 interrupt_number)
618 {
619 struct acpi_gpe_block_info *next_gpe_block;
620 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
621 acpi_status status;
622 acpi_cpu_flags flags;
624 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
626 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
627 if (ACPI_FAILURE(status)) {
628 return_ACPI_STATUS(status);
629 }
631 gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
632 if (!gpe_xrupt_block) {
633 status = AE_NO_MEMORY;
634 goto unlock_and_exit;
635 }
637 /* Install the new block at the end of the list with lock */
639 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
640 if (gpe_xrupt_block->gpe_block_list_head) {
641 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
642 while (next_gpe_block->next) {
643 next_gpe_block = next_gpe_block->next;
644 }
646 next_gpe_block->next = gpe_block;
647 gpe_block->previous = next_gpe_block;
648 } else {
649 gpe_xrupt_block->gpe_block_list_head = gpe_block;
650 }
652 gpe_block->xrupt_block = gpe_xrupt_block;
653 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
655 unlock_and_exit:
656 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
657 return_ACPI_STATUS(status);
658 }
660 /*******************************************************************************
661 *
662 * FUNCTION: acpi_ev_delete_gpe_block
663 *
664 * PARAMETERS: gpe_block - Existing GPE block
665 *
666 * RETURN: Status
667 *
668 * DESCRIPTION: Remove a GPE block
669 *
670 ******************************************************************************/
672 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
673 {
674 acpi_status status;
675 acpi_cpu_flags flags;
677 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
679 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
680 if (ACPI_FAILURE(status)) {
681 return_ACPI_STATUS(status);
682 }
684 /* Disable all GPEs in this block */
686 status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
688 if (!gpe_block->previous && !gpe_block->next) {
690 /* This is the last gpe_block on this interrupt */
692 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
693 if (ACPI_FAILURE(status)) {
694 goto unlock_and_exit;
695 }
696 } else {
697 /* Remove the block on this interrupt with lock */
699 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
700 if (gpe_block->previous) {
701 gpe_block->previous->next = gpe_block->next;
702 } else {
703 gpe_block->xrupt_block->gpe_block_list_head =
704 gpe_block->next;
705 }
707 if (gpe_block->next) {
708 gpe_block->next->previous = gpe_block->previous;
709 }
710 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
711 }
713 /* Free the gpe_block */
715 ACPI_FREE(gpe_block->register_info);
716 ACPI_FREE(gpe_block->event_info);
717 ACPI_FREE(gpe_block);
719 unlock_and_exit:
720 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
721 return_ACPI_STATUS(status);
722 }
724 /*******************************************************************************
725 *
726 * FUNCTION: acpi_ev_create_gpe_info_blocks
727 *
728 * PARAMETERS: gpe_block - New GPE block
729 *
730 * RETURN: Status
731 *
732 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
733 *
734 ******************************************************************************/
736 static acpi_status
737 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
738 {
739 struct acpi_gpe_register_info *gpe_register_info = NULL;
740 struct acpi_gpe_event_info *gpe_event_info = NULL;
741 struct acpi_gpe_event_info *this_event;
742 struct acpi_gpe_register_info *this_register;
743 acpi_native_uint i;
744 acpi_native_uint j;
745 acpi_status status;
747 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
749 /* Allocate the GPE register information block */
751 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
752 register_count *
753 sizeof(struct
754 acpi_gpe_register_info));
755 if (!gpe_register_info) {
756 ACPI_ERROR((AE_INFO,
757 "Could not allocate the GpeRegisterInfo table"));
758 return_ACPI_STATUS(AE_NO_MEMORY);
759 }
761 /*
762 * Allocate the GPE event_info block. There are eight distinct GPEs
763 * per register. Initialization to zeros is sufficient.
764 */
765 gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
766 register_count *
767 ACPI_GPE_REGISTER_WIDTH) *
768 sizeof(struct
769 acpi_gpe_event_info));
770 if (!gpe_event_info) {
771 ACPI_ERROR((AE_INFO,
772 "Could not allocate the GpeEventInfo table"));
773 status = AE_NO_MEMORY;
774 goto error_exit;
775 }
777 /* Save the new Info arrays in the GPE block */
779 gpe_block->register_info = gpe_register_info;
780 gpe_block->event_info = gpe_event_info;
782 /*
783 * Initialize the GPE Register and Event structures. A goal of these
784 * tables is to hide the fact that there are two separate GPE register sets
785 * in a given GPE hardware block, the status registers occupy the first half,
786 * and the enable registers occupy the second half.
787 */
788 this_register = gpe_register_info;
789 this_event = gpe_event_info;
791 for (i = 0; i < gpe_block->register_count; i++) {
793 /* Init the register_info for this GPE register (8 GPEs) */
795 this_register->base_gpe_number =
796 (u8) (gpe_block->block_base_number +
797 (i * ACPI_GPE_REGISTER_WIDTH));
799 ACPI_STORE_ADDRESS(this_register->status_address.address,
800 (gpe_block->block_address.address + i));
802 ACPI_STORE_ADDRESS(this_register->enable_address.address,
803 (gpe_block->block_address.address
804 + i + gpe_block->register_count));
806 this_register->status_address.address_space_id =
807 gpe_block->block_address.address_space_id;
808 this_register->enable_address.address_space_id =
809 gpe_block->block_address.address_space_id;
810 this_register->status_address.register_bit_width =
811 ACPI_GPE_REGISTER_WIDTH;
812 this_register->enable_address.register_bit_width =
813 ACPI_GPE_REGISTER_WIDTH;
814 this_register->status_address.register_bit_offset =
815 ACPI_GPE_REGISTER_WIDTH;
816 this_register->enable_address.register_bit_offset =
817 ACPI_GPE_REGISTER_WIDTH;
819 /* Init the event_info for each GPE within this register */
821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
822 this_event->register_bit = acpi_gbl_decode_to8bit[j];
823 this_event->register_info = this_register;
824 this_event++;
825 }
827 /* Disable all GPEs within this register */
829 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
830 &this_register->
831 enable_address);
832 if (ACPI_FAILURE(status)) {
833 goto error_exit;
834 }
836 /* Clear any pending GPE events within this register */
838 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
839 &this_register->
840 status_address);
841 if (ACPI_FAILURE(status)) {
842 goto error_exit;
843 }
845 this_register++;
846 }
848 return_ACPI_STATUS(AE_OK);
850 error_exit:
851 if (gpe_register_info) {
852 ACPI_FREE(gpe_register_info);
853 }
854 if (gpe_event_info) {
855 ACPI_FREE(gpe_event_info);
856 }
858 return_ACPI_STATUS(status);
859 }
861 /*******************************************************************************
862 *
863 * FUNCTION: acpi_ev_create_gpe_block
864 *
865 * PARAMETERS: gpe_device - Handle to the parent GPE block
866 * gpe_block_address - Address and space_iD
867 * register_count - Number of GPE register pairs in the block
868 * gpe_block_base_number - Starting GPE number for the block
869 * interrupt_number - H/W interrupt for the block
870 * return_gpe_block - Where the new block descriptor is returned
871 *
872 * RETURN: Status
873 *
874 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
875 * the block are disabled at exit.
876 * Note: Assumes namespace is locked.
877 *
878 ******************************************************************************/
880 acpi_status
881 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
882 struct acpi_generic_address *gpe_block_address,
883 u32 register_count,
884 u8 gpe_block_base_number,
885 u32 interrupt_number,
886 struct acpi_gpe_block_info **return_gpe_block)
887 {
888 acpi_status status;
889 struct acpi_gpe_block_info *gpe_block;
891 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
893 if (!register_count) {
894 return_ACPI_STATUS(AE_OK);
895 }
897 /* Allocate a new GPE block */
899 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
900 if (!gpe_block) {
901 return_ACPI_STATUS(AE_NO_MEMORY);
902 }
904 /* Initialize the new GPE block */
906 gpe_block->node = gpe_device;
907 gpe_block->register_count = register_count;
908 gpe_block->block_base_number = gpe_block_base_number;
910 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
911 sizeof(struct acpi_generic_address));
913 /*
914 * Create the register_info and event_info sub-structures
915 * Note: disables and clears all GPEs in the block
916 */
917 status = acpi_ev_create_gpe_info_blocks(gpe_block);
918 if (ACPI_FAILURE(status)) {
919 ACPI_FREE(gpe_block);
920 return_ACPI_STATUS(status);
921 }
923 /* Install the new block in the global lists */
925 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
926 if (ACPI_FAILURE(status)) {
927 ACPI_FREE(gpe_block);
928 return_ACPI_STATUS(status);
929 }
931 /* Find all GPE methods (_Lxx, _Exx) for this block */
933 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
934 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
935 acpi_ev_save_method_info, gpe_block,
936 NULL);
938 /* Return the new block */
940 if (return_gpe_block) {
941 (*return_gpe_block) = gpe_block;
942 }
944 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
945 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
946 (u32) gpe_block->block_base_number,
947 (u32) (gpe_block->block_base_number +
948 ((gpe_block->register_count *
949 ACPI_GPE_REGISTER_WIDTH) - 1)),
950 gpe_device->name.ascii, gpe_block->register_count,
951 interrupt_number));
953 return_ACPI_STATUS(AE_OK);
954 }
956 /*******************************************************************************
957 *
958 * FUNCTION: acpi_ev_initialize_gpe_block
959 *
960 * PARAMETERS: gpe_device - Handle to the parent GPE block
961 * gpe_block - Gpe Block info
962 *
963 * RETURN: Status
964 *
965 * DESCRIPTION: Initialize and enable a GPE block. First find and run any
966 * _PRT methods associated with the block, then enable the
967 * appropriate GPEs.
968 * Note: Assumes namespace is locked.
969 *
970 ******************************************************************************/
972 acpi_status
973 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
974 struct acpi_gpe_block_info *gpe_block)
975 {
976 acpi_status status;
977 struct acpi_gpe_event_info *gpe_event_info;
978 struct acpi_gpe_walk_info gpe_info;
979 u32 wake_gpe_count;
980 u32 gpe_enabled_count;
981 acpi_native_uint i;
982 acpi_native_uint j;
984 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
986 /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
988 if (!gpe_block) {
989 return_ACPI_STATUS(AE_OK);
990 }
992 /*
993 * Runtime option: Should wake GPEs be enabled at runtime? The default
994 * is no, they should only be enabled just as the machine goes to sleep.
995 */
996 if (acpi_gbl_leave_wake_gpes_disabled) {
997 /*
998 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
999 * Each GPE that has one or more _PRWs that reference it is by
1000 * definition a wake GPE and will not be enabled while the machine
1001 * is running.
1002 */
1003 gpe_info.gpe_block = gpe_block;
1004 gpe_info.gpe_device = gpe_device;
1006 status =
1007 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1008 ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
1009 acpi_ev_match_prw_and_gpe, &gpe_info,
1010 NULL);
1013 /*
1014 * Enable all GPEs in this block that have these attributes:
1015 * 1) are "runtime" or "run/wake" GPEs, and
1016 * 2) have a corresponding _Lxx or _Exx method
1018 * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
1019 * external interface.
1020 */
1021 wake_gpe_count = 0;
1022 gpe_enabled_count = 0;
1024 for (i = 0; i < gpe_block->register_count; i++) {
1025 for (j = 0; j < 8; j++) {
1027 /* Get the info block for this particular GPE */
1029 gpe_event_info =
1030 &gpe_block->
1031 event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
1033 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1034 ACPI_GPE_DISPATCH_METHOD)
1035 && (gpe_event_info->
1036 flags & ACPI_GPE_TYPE_RUNTIME)) {
1037 gpe_enabled_count++;
1040 if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
1041 wake_gpe_count++;
1046 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1047 "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
1048 wake_gpe_count, gpe_enabled_count));
1050 /* Enable all valid runtime GPEs found above */
1052 status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
1053 if (ACPI_FAILURE(status)) {
1054 ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
1055 gpe_block));
1058 return_ACPI_STATUS(status);
1061 /*******************************************************************************
1063 * FUNCTION: acpi_ev_gpe_initialize
1065 * PARAMETERS: None
1067 * RETURN: Status
1069 * DESCRIPTION: Initialize the GPE data structures
1071 ******************************************************************************/
1073 acpi_status acpi_ev_gpe_initialize(void)
1075 u32 register_count0 = 0;
1076 u32 register_count1 = 0;
1077 u32 gpe_number_max = 0;
1078 acpi_status status;
1080 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1082 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1083 if (ACPI_FAILURE(status)) {
1084 return_ACPI_STATUS(status);
1087 /*
1088 * Initialize the GPE Block(s) defined in the FADT
1090 * Why the GPE register block lengths are divided by 2: From the ACPI Spec,
1091 * section "General-Purpose Event Registers", we have:
1093 * "Each register block contains two registers of equal length
1094 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1095 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1096 * The length of the GPE1_STS and GPE1_EN registers is equal to
1097 * half the GPE1_LEN. If a generic register block is not supported
1098 * then its respective block pointer and block length values in the
1099 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1100 * to be the same size."
1101 */
1103 /*
1104 * Determine the maximum GPE number for this machine.
1106 * Note: both GPE0 and GPE1 are optional, and either can exist without
1107 * the other.
1109 * If EITHER the register length OR the block address are zero, then that
1110 * particular block is not supported.
1111 */
1112 if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
1114 /* GPE block 0 exists (has both length and address > 0) */
1116 register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
1118 gpe_number_max =
1119 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1121 /* Install GPE Block 0 */
1123 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1124 &acpi_gbl_FADT->xgpe0_blk,
1125 register_count0, 0,
1126 acpi_gbl_FADT->sci_int,
1127 &acpi_gbl_gpe_fadt_blocks[0]);
1129 if (ACPI_FAILURE(status)) {
1130 ACPI_EXCEPTION((AE_INFO, status,
1131 "Could not create GPE Block 0"));
1135 if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
1137 /* GPE block 1 exists (has both length and address > 0) */
1139 register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
1141 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1143 if ((register_count0) &&
1144 (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
1145 ACPI_ERROR((AE_INFO,
1146 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1147 gpe_number_max, acpi_gbl_FADT->gpe1_base,
1148 acpi_gbl_FADT->gpe1_base +
1149 ((register_count1 *
1150 ACPI_GPE_REGISTER_WIDTH) - 1)));
1152 /* Ignore GPE1 block by setting the register count to zero */
1154 register_count1 = 0;
1155 } else {
1156 /* Install GPE Block 1 */
1158 status =
1159 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1160 &acpi_gbl_FADT->xgpe1_blk,
1161 register_count1,
1162 acpi_gbl_FADT->gpe1_base,
1163 acpi_gbl_FADT->sci_int,
1164 &acpi_gbl_gpe_fadt_blocks
1165 [1]);
1167 if (ACPI_FAILURE(status)) {
1168 ACPI_EXCEPTION((AE_INFO, status,
1169 "Could not create GPE Block 1"));
1172 /*
1173 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1174 * space. However, GPE0 always starts at GPE number zero.
1175 */
1176 gpe_number_max = acpi_gbl_FADT->gpe1_base +
1177 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1181 /* Exit if there are no GPE registers */
1183 if ((register_count0 + register_count1) == 0) {
1185 /* GPEs are not required by ACPI, this is OK */
1187 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1188 "There are no GPE blocks defined in the FADT\n"));
1189 status = AE_OK;
1190 goto cleanup;
1193 /* Check for Max GPE number out-of-range */
1195 if (gpe_number_max > ACPI_GPE_MAX) {
1196 ACPI_ERROR((AE_INFO,
1197 "Maximum GPE number from FADT is too large: 0x%X",
1198 gpe_number_max));
1199 status = AE_BAD_VALUE;
1200 goto cleanup;
1203 cleanup:
1204 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1205 return_ACPI_STATUS(AE_OK);