ia64/linux-2.6.18-xen.hg

view drivers/acpi/osl.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 5f3c40a4c214
children
line source
1 /*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 *
26 */
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/smp_lock.h>
34 #include <linux/interrupt.h>
35 #include <linux/kmod.h>
36 #include <linux/delay.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <acpi/acpi.h>
40 #include <asm/io.h>
41 #include <acpi/acpi_bus.h>
42 #include <acpi/processor.h>
43 #include <asm/uaccess.h>
45 #include <linux/efi.h>
47 #define _COMPONENT ACPI_OS_SERVICES
48 ACPI_MODULE_NAME("osl")
49 #define PREFIX "ACPI: "
50 struct acpi_os_dpc {
51 acpi_osd_exec_callback function;
52 void *context;
53 };
55 #ifdef CONFIG_ACPI_CUSTOM_DSDT
56 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
57 #endif
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 EXPORT_SYMBOL(acpi_in_debugger);
66 extern char line_buf[80];
67 #endif /*ENABLE_DEBUGGER */
69 int acpi_specific_hotkey_enabled = TRUE;
70 EXPORT_SYMBOL(acpi_specific_hotkey_enabled);
72 static unsigned int acpi_irq_irq;
73 static acpi_osd_handler acpi_irq_handler;
74 static void *acpi_irq_context;
75 static struct workqueue_struct *kacpid_wq;
77 acpi_status acpi_os_initialize(void)
78 {
79 return AE_OK;
80 }
82 acpi_status acpi_os_initialize1(void)
83 {
84 /*
85 * Initialize PCI configuration space access, as we'll need to access
86 * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
87 */
88 if (!raw_pci_ops) {
89 printk(KERN_ERR PREFIX
90 "Access to PCI configuration space unavailable\n");
91 return AE_NULL_ENTRY;
92 }
93 kacpid_wq = create_singlethread_workqueue("kacpid");
94 BUG_ON(!kacpid_wq);
96 return AE_OK;
97 }
99 acpi_status acpi_os_terminate(void)
100 {
101 if (acpi_irq_handler) {
102 acpi_os_remove_interrupt_handler(acpi_irq_irq,
103 acpi_irq_handler);
104 }
106 destroy_workqueue(kacpid_wq);
108 return AE_OK;
109 }
111 void acpi_os_printf(const char *fmt, ...)
112 {
113 va_list args;
114 va_start(args, fmt);
115 acpi_os_vprintf(fmt, args);
116 va_end(args);
117 }
119 EXPORT_SYMBOL(acpi_os_printf);
121 void acpi_os_vprintf(const char *fmt, va_list args)
122 {
123 static char buffer[512];
125 vsprintf(buffer, fmt, args);
127 #ifdef ENABLE_DEBUGGER
128 if (acpi_in_debugger) {
129 kdb_printf("%s", buffer);
130 } else {
131 printk("%s", buffer);
132 }
133 #else
134 printk("%s", buffer);
135 #endif
136 }
138 acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
139 {
140 if (efi_enabled) {
141 addr->pointer_type = ACPI_PHYSICAL_POINTER;
142 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
143 addr->pointer.physical = efi.acpi20;
144 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
145 addr->pointer.physical = efi.acpi;
146 else {
147 printk(KERN_ERR PREFIX
148 "System description tables not found\n");
149 return AE_NOT_FOUND;
150 }
151 } else {
152 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) {
153 printk(KERN_ERR PREFIX
154 "System description tables not found\n");
155 return AE_NOT_FOUND;
156 }
157 }
159 return AE_OK;
160 }
162 acpi_status
163 acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
164 void __iomem ** virt)
165 {
166 if (phys > ULONG_MAX) {
167 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
168 return AE_BAD_PARAMETER;
169 }
170 /*
171 * ioremap checks to ensure this is in reserved space
172 */
173 *virt = ioremap((unsigned long)phys, size);
175 if (!*virt)
176 return AE_NO_MEMORY;
178 return AE_OK;
179 }
180 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
182 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
183 {
184 iounmap(virt);
185 }
186 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
188 #ifdef ACPI_FUTURE_USAGE
189 acpi_status
190 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
191 {
192 if (!phys || !virt)
193 return AE_BAD_PARAMETER;
195 *phys = virt_to_phys(virt);
197 return AE_OK;
198 }
199 #endif
201 #define ACPI_MAX_OVERRIDE_LEN 100
203 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
205 acpi_status
206 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
207 acpi_string * new_val)
208 {
209 if (!init_val || !new_val)
210 return AE_BAD_PARAMETER;
212 *new_val = NULL;
213 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
214 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
215 acpi_os_name);
216 *new_val = acpi_os_name;
217 }
219 return AE_OK;
220 }
222 acpi_status
223 acpi_os_table_override(struct acpi_table_header * existing_table,
224 struct acpi_table_header ** new_table)
225 {
226 if (!existing_table || !new_table)
227 return AE_BAD_PARAMETER;
229 #ifdef CONFIG_ACPI_CUSTOM_DSDT
230 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
231 *new_table = (struct acpi_table_header *)AmlCode;
232 else
233 *new_table = NULL;
234 #else
235 *new_table = NULL;
236 #endif
237 return AE_OK;
238 }
240 static irqreturn_t acpi_irq(int irq, void *dev_id, struct pt_regs *regs)
241 {
242 return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
243 }
245 acpi_status
246 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
247 void *context)
248 {
249 unsigned int irq;
251 /*
252 * Ignore the GSI from the core, and use the value in our copy of the
253 * FADT. It may not be the same if an interrupt source override exists
254 * for the SCI.
255 */
256 gsi = acpi_fadt.sci_int;
257 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
258 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
259 gsi);
260 return AE_OK;
261 }
263 acpi_irq_handler = handler;
264 acpi_irq_context = context;
265 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
266 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
267 return AE_NOT_ACQUIRED;
268 }
269 acpi_irq_irq = irq;
271 return AE_OK;
272 }
274 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
275 {
276 if (irq) {
277 free_irq(irq, acpi_irq);
278 acpi_irq_handler = NULL;
279 acpi_irq_irq = 0;
280 }
282 return AE_OK;
283 }
285 /*
286 * Running in interpreter thread context, safe to sleep
287 */
289 void acpi_os_sleep(acpi_integer ms)
290 {
291 schedule_timeout_interruptible(msecs_to_jiffies(ms));
292 }
294 EXPORT_SYMBOL(acpi_os_sleep);
296 void acpi_os_stall(u32 us)
297 {
298 while (us) {
299 u32 delay = 1000;
301 if (delay > us)
302 delay = us;
303 udelay(delay);
304 touch_nmi_watchdog();
305 us -= delay;
306 }
307 }
309 EXPORT_SYMBOL(acpi_os_stall);
311 /*
312 * Support ACPI 3.0 AML Timer operand
313 * Returns 64-bit free-running, monotonically increasing timer
314 * with 100ns granularity
315 */
316 u64 acpi_os_get_timer(void)
317 {
318 static u64 t;
320 #ifdef CONFIG_HPET
321 /* TBD: use HPET if available */
322 #endif
324 #ifdef CONFIG_X86_PM_TIMER
325 /* TBD: default to PM timer if HPET was not available */
326 #endif
327 if (!t)
328 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
330 return ++t;
331 }
333 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
334 {
335 u32 dummy;
337 if (!value)
338 value = &dummy;
340 *value = 0;
341 if (width <= 8) {
342 *(u8 *) value = inb(port);
343 } else if (width <= 16) {
344 *(u16 *) value = inw(port);
345 } else if (width <= 32) {
346 *(u32 *) value = inl(port);
347 } else {
348 BUG();
349 }
351 return AE_OK;
352 }
354 EXPORT_SYMBOL(acpi_os_read_port);
356 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
357 {
358 if (width <= 8) {
359 outb(value, port);
360 } else if (width <= 16) {
361 outw(value, port);
362 } else if (width <= 32) {
363 outl(value, port);
364 } else {
365 BUG();
366 }
368 return AE_OK;
369 }
371 EXPORT_SYMBOL(acpi_os_write_port);
373 acpi_status
374 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
375 {
376 u32 dummy;
377 void __iomem *virt_addr;
379 virt_addr = ioremap(phys_addr, width);
380 if (!value)
381 value = &dummy;
383 switch (width) {
384 case 8:
385 *(u8 *) value = readb(virt_addr);
386 break;
387 case 16:
388 *(u16 *) value = readw(virt_addr);
389 break;
390 case 32:
391 *(u32 *) value = readl(virt_addr);
392 break;
393 default:
394 BUG();
395 }
397 iounmap(virt_addr);
399 return AE_OK;
400 }
402 acpi_status
403 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
404 {
405 void __iomem *virt_addr;
407 virt_addr = ioremap(phys_addr, width);
409 switch (width) {
410 case 8:
411 writeb(value, virt_addr);
412 break;
413 case 16:
414 writew(value, virt_addr);
415 break;
416 case 32:
417 writel(value, virt_addr);
418 break;
419 default:
420 BUG();
421 }
423 iounmap(virt_addr);
425 return AE_OK;
426 }
428 acpi_status
429 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
430 void *value, u32 width)
431 {
432 int result, size;
434 if (!value)
435 return AE_BAD_PARAMETER;
437 switch (width) {
438 case 8:
439 size = 1;
440 break;
441 case 16:
442 size = 2;
443 break;
444 case 32:
445 size = 4;
446 break;
447 default:
448 return AE_ERROR;
449 }
451 BUG_ON(!raw_pci_ops);
453 result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
454 PCI_DEVFN(pci_id->device, pci_id->function),
455 reg, size, value);
457 return (result ? AE_ERROR : AE_OK);
458 }
460 EXPORT_SYMBOL(acpi_os_read_pci_configuration);
462 acpi_status
463 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
464 acpi_integer value, u32 width)
465 {
466 int result, size;
468 switch (width) {
469 case 8:
470 size = 1;
471 break;
472 case 16:
473 size = 2;
474 break;
475 case 32:
476 size = 4;
477 break;
478 default:
479 return AE_ERROR;
480 }
482 BUG_ON(!raw_pci_ops);
484 result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
485 PCI_DEVFN(pci_id->device, pci_id->function),
486 reg, size, value);
488 return (result ? AE_ERROR : AE_OK);
489 }
491 /* TODO: Change code to take advantage of driver model more */
492 static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
493 acpi_handle chandle, /* current node */
494 struct acpi_pci_id **id,
495 int *is_bridge, u8 * bus_number)
496 {
497 acpi_handle handle;
498 struct acpi_pci_id *pci_id = *id;
499 acpi_status status;
500 unsigned long temp;
501 acpi_object_type type;
502 u8 tu8;
504 acpi_get_parent(chandle, &handle);
505 if (handle != rhandle) {
506 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
507 bus_number);
509 status = acpi_get_type(handle, &type);
510 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
511 return;
513 status =
514 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
515 &temp);
516 if (ACPI_SUCCESS(status)) {
517 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
518 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
520 if (*is_bridge)
521 pci_id->bus = *bus_number;
523 /* any nicer way to get bus number of bridge ? */
524 status =
525 acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
526 8);
527 if (ACPI_SUCCESS(status)
528 && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
529 status =
530 acpi_os_read_pci_configuration(pci_id, 0x18,
531 &tu8, 8);
532 if (!ACPI_SUCCESS(status)) {
533 /* Certainly broken... FIX ME */
534 return;
535 }
536 *is_bridge = 1;
537 pci_id->bus = tu8;
538 status =
539 acpi_os_read_pci_configuration(pci_id, 0x19,
540 &tu8, 8);
541 if (ACPI_SUCCESS(status)) {
542 *bus_number = tu8;
543 }
544 } else
545 *is_bridge = 0;
546 }
547 }
548 }
550 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
551 acpi_handle chandle, /* current node */
552 struct acpi_pci_id **id)
553 {
554 int is_bridge = 1;
555 u8 bus_number = (*id)->bus;
557 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
558 }
560 static void acpi_os_execute_deferred(void *context)
561 {
562 struct acpi_os_dpc *dpc = NULL;
565 dpc = (struct acpi_os_dpc *)context;
566 if (!dpc) {
567 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
568 return;
569 }
571 dpc->function(dpc->context);
573 kfree(dpc);
575 return;
576 }
578 /*******************************************************************************
579 *
580 * FUNCTION: acpi_os_execute
581 *
582 * PARAMETERS: Type - Type of the callback
583 * Function - Function to be executed
584 * Context - Function parameters
585 *
586 * RETURN: Status
587 *
588 * DESCRIPTION: Depending on type, either queues function for deferred execution or
589 * immediately executes function on a separate thread.
590 *
591 ******************************************************************************/
593 acpi_status acpi_os_execute(acpi_execute_type type,
594 acpi_osd_exec_callback function, void *context)
595 {
596 acpi_status status = AE_OK;
597 struct acpi_os_dpc *dpc;
598 struct work_struct *task;
600 ACPI_FUNCTION_TRACE("os_queue_for_execution");
602 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
603 "Scheduling function [%p(%p)] for deferred execution.\n",
604 function, context));
606 if (!function)
607 return_ACPI_STATUS(AE_BAD_PARAMETER);
609 /*
610 * Allocate/initialize DPC structure. Note that this memory will be
611 * freed by the callee. The kernel handles the tq_struct list in a
612 * way that allows us to also free its memory inside the callee.
613 * Because we may want to schedule several tasks with different
614 * parameters we can't use the approach some kernel code uses of
615 * having a static tq_struct.
616 * We can save time and code by allocating the DPC and tq_structs
617 * from the same memory.
618 */
620 dpc =
621 kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
622 GFP_ATOMIC);
623 if (!dpc)
624 return_ACPI_STATUS(AE_NO_MEMORY);
626 dpc->function = function;
627 dpc->context = context;
629 task = (void *)(dpc + 1);
630 INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
632 if (!queue_work(kacpid_wq, task)) {
633 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
634 "Call to queue_work() failed.\n"));
635 kfree(dpc);
636 status = AE_ERROR;
637 }
639 return_ACPI_STATUS(status);
640 }
642 EXPORT_SYMBOL(acpi_os_execute);
644 void acpi_os_wait_events_complete(void *context)
645 {
646 flush_workqueue(kacpid_wq);
647 }
649 EXPORT_SYMBOL(acpi_os_wait_events_complete);
651 /*
652 * Allocate the memory for a spinlock and initialize it.
653 */
654 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
655 {
656 spin_lock_init(*handle);
658 return AE_OK;
659 }
661 /*
662 * Deallocate the memory for a spinlock.
663 */
664 void acpi_os_delete_lock(acpi_spinlock handle)
665 {
666 return;
667 }
669 acpi_status
670 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
671 {
672 struct semaphore *sem = NULL;
675 sem = acpi_os_allocate(sizeof(struct semaphore));
676 if (!sem)
677 return AE_NO_MEMORY;
678 memset(sem, 0, sizeof(struct semaphore));
680 sema_init(sem, initial_units);
682 *handle = (acpi_handle *) sem;
684 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
685 *handle, initial_units));
687 return AE_OK;
688 }
690 EXPORT_SYMBOL(acpi_os_create_semaphore);
692 /*
693 * TODO: A better way to delete semaphores? Linux doesn't have a
694 * 'delete_semaphore()' function -- may result in an invalid
695 * pointer dereference for non-synchronized consumers. Should
696 * we at least check for blocked threads and signal/cancel them?
697 */
699 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
700 {
701 struct semaphore *sem = (struct semaphore *)handle;
704 if (!sem)
705 return AE_BAD_PARAMETER;
707 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
709 kfree(sem);
710 sem = NULL;
712 return AE_OK;
713 }
715 EXPORT_SYMBOL(acpi_os_delete_semaphore);
717 /*
718 * TODO: The kernel doesn't have a 'down_timeout' function -- had to
719 * improvise. The process is to sleep for one scheduler quantum
720 * until the semaphore becomes available. Downside is that this
721 * may result in starvation for timeout-based waits when there's
722 * lots of semaphore activity.
723 *
724 * TODO: Support for units > 1?
725 */
726 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
727 {
728 acpi_status status = AE_OK;
729 struct semaphore *sem = (struct semaphore *)handle;
730 int ret = 0;
733 if (!sem || (units < 1))
734 return AE_BAD_PARAMETER;
736 if (units > 1)
737 return AE_SUPPORT;
739 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
740 handle, units, timeout));
742 /*
743 * This can be called during resume with interrupts off.
744 * Like boot-time, we should be single threaded and will
745 * always get the lock if we try -- timeout or not.
746 * If this doesn't succeed, then we will oops courtesy of
747 * might_sleep() in down().
748 */
749 if (!down_trylock(sem))
750 return AE_OK;
752 switch (timeout) {
753 /*
754 * No Wait:
755 * --------
756 * A zero timeout value indicates that we shouldn't wait - just
757 * acquire the semaphore if available otherwise return AE_TIME
758 * (a.k.a. 'would block').
759 */
760 case 0:
761 if (down_trylock(sem))
762 status = AE_TIME;
763 break;
765 /*
766 * Wait Indefinitely:
767 * ------------------
768 */
769 case ACPI_WAIT_FOREVER:
770 down(sem);
771 break;
773 /*
774 * Wait w/ Timeout:
775 * ----------------
776 */
777 default:
778 // TODO: A better timeout algorithm?
779 {
780 int i = 0;
781 static const int quantum_ms = 1000 / HZ;
783 ret = down_trylock(sem);
784 for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
785 schedule_timeout_interruptible(1);
786 ret = down_trylock(sem);
787 }
789 if (ret != 0)
790 status = AE_TIME;
791 }
792 break;
793 }
795 if (ACPI_FAILURE(status)) {
796 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
797 "Failed to acquire semaphore[%p|%d|%d], %s",
798 handle, units, timeout,
799 acpi_format_exception(status)));
800 } else {
801 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
802 "Acquired semaphore[%p|%d|%d]", handle,
803 units, timeout));
804 }
806 return status;
807 }
809 EXPORT_SYMBOL(acpi_os_wait_semaphore);
811 /*
812 * TODO: Support for units > 1?
813 */
814 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
815 {
816 struct semaphore *sem = (struct semaphore *)handle;
819 if (!sem || (units < 1))
820 return AE_BAD_PARAMETER;
822 if (units > 1)
823 return AE_SUPPORT;
825 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
826 units));
828 up(sem);
830 return AE_OK;
831 }
833 EXPORT_SYMBOL(acpi_os_signal_semaphore);
835 #ifdef ACPI_FUTURE_USAGE
836 u32 acpi_os_get_line(char *buffer)
837 {
839 #ifdef ENABLE_DEBUGGER
840 if (acpi_in_debugger) {
841 u32 chars;
843 kdb_read(buffer, sizeof(line_buf));
845 /* remove the CR kdb includes */
846 chars = strlen(buffer) - 1;
847 buffer[chars] = '\0';
848 }
849 #endif
851 return 0;
852 }
853 #endif /* ACPI_FUTURE_USAGE */
855 /* Assumes no unreadable holes inbetween */
856 u8 acpi_os_readable(void *ptr, acpi_size len)
857 {
858 #if defined(__i386__) || defined(__x86_64__)
859 char tmp;
860 return !__get_user(tmp, (char __user *)ptr)
861 && !__get_user(tmp, (char __user *)ptr + len - 1);
862 #endif
863 return 1;
864 }
866 #ifdef ACPI_FUTURE_USAGE
867 u8 acpi_os_writable(void *ptr, acpi_size len)
868 {
869 /* could do dummy write (racy) or a kernel page table lookup.
870 The later may be difficult at early boot when kmap doesn't work yet. */
871 return 1;
872 }
873 #endif
875 acpi_status acpi_os_signal(u32 function, void *info)
876 {
877 switch (function) {
878 case ACPI_SIGNAL_FATAL:
879 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
880 break;
881 case ACPI_SIGNAL_BREAKPOINT:
882 /*
883 * AML Breakpoint
884 * ACPI spec. says to treat it as a NOP unless
885 * you are debugging. So if/when we integrate
886 * AML debugger into the kernel debugger its
887 * hook will go here. But until then it is
888 * not useful to print anything on breakpoints.
889 */
890 break;
891 default:
892 break;
893 }
895 return AE_OK;
896 }
898 EXPORT_SYMBOL(acpi_os_signal);
900 static int __init acpi_os_name_setup(char *str)
901 {
902 char *p = acpi_os_name;
903 int count = ACPI_MAX_OVERRIDE_LEN - 1;
905 if (!str || !*str)
906 return 0;
908 for (; count-- && str && *str; str++) {
909 if (isalnum(*str) || *str == ' ' || *str == ':')
910 *p++ = *str;
911 else if (*str == '\'' || *str == '"')
912 continue;
913 else
914 break;
915 }
916 *p = 0;
918 return 1;
920 }
922 __setup("acpi_os_name=", acpi_os_name_setup);
924 /*
925 * _OSI control
926 * empty string disables _OSI
927 * TBD additional string adds to _OSI
928 */
929 static int __init acpi_osi_setup(char *str)
930 {
931 if (str == NULL || *str == '\0') {
932 printk(KERN_INFO PREFIX "_OSI method disabled\n");
933 acpi_gbl_create_osi_method = FALSE;
934 } else {
935 /* TBD */
936 printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n",
937 str);
938 }
940 return 1;
941 }
943 __setup("acpi_osi=", acpi_osi_setup);
945 /* enable serialization to combat AE_ALREADY_EXISTS errors */
946 static int __init acpi_serialize_setup(char *str)
947 {
948 printk(KERN_INFO PREFIX "serialize enabled\n");
950 acpi_gbl_all_methods_serialized = TRUE;
952 return 1;
953 }
955 __setup("acpi_serialize", acpi_serialize_setup);
957 /*
958 * Wake and Run-Time GPES are expected to be separate.
959 * We disable wake-GPEs at run-time to prevent spurious
960 * interrupts.
961 *
962 * However, if a system exists that shares Wake and
963 * Run-time events on the same GPE this flag is available
964 * to tell Linux to keep the wake-time GPEs enabled at run-time.
965 */
966 static int __init acpi_wake_gpes_always_on_setup(char *str)
967 {
968 printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
970 acpi_gbl_leave_wake_gpes_disabled = FALSE;
972 return 1;
973 }
975 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
977 static int __init acpi_hotkey_setup(char *str)
978 {
979 acpi_specific_hotkey_enabled = FALSE;
980 return 1;
981 }
983 __setup("acpi_generic_hotkey", acpi_hotkey_setup);
985 /*
986 * max_cstate is defined in the base kernel so modules can
987 * change it w/o depending on the state of the processor module.
988 */
989 unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER;
991 EXPORT_SYMBOL(max_cstate);
993 /*
994 * Acquire a spinlock.
995 *
996 * handle is a pointer to the spinlock_t.
997 */
999 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1001 acpi_cpu_flags flags;
1002 spin_lock_irqsave(lockp, flags);
1003 return flags;
1006 /*
1007 * Release a spinlock. See above.
1008 */
1010 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1012 spin_unlock_irqrestore(lockp, flags);
1015 #ifndef ACPI_USE_LOCAL_CACHE
1017 /*******************************************************************************
1019 * FUNCTION: acpi_os_create_cache
1021 * PARAMETERS: name - Ascii name for the cache
1022 * size - Size of each cached object
1023 * depth - Maximum depth of the cache (in objects) <ignored>
1024 * cache - Where the new cache object is returned
1026 * RETURN: status
1028 * DESCRIPTION: Create a cache object
1030 ******************************************************************************/
1032 acpi_status
1033 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1035 *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
1036 if (cache == NULL)
1037 return AE_ERROR;
1038 else
1039 return AE_OK;
1042 /*******************************************************************************
1044 * FUNCTION: acpi_os_purge_cache
1046 * PARAMETERS: Cache - Handle to cache object
1048 * RETURN: Status
1050 * DESCRIPTION: Free all objects within the requested cache.
1052 ******************************************************************************/
1054 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1056 (void)kmem_cache_shrink(cache);
1057 return (AE_OK);
1060 /*******************************************************************************
1062 * FUNCTION: acpi_os_delete_cache
1064 * PARAMETERS: Cache - Handle to cache object
1066 * RETURN: Status
1068 * DESCRIPTION: Free all objects within the requested cache and delete the
1069 * cache object.
1071 ******************************************************************************/
1073 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1075 (void)kmem_cache_destroy(cache);
1076 return (AE_OK);
1079 /*******************************************************************************
1081 * FUNCTION: acpi_os_release_object
1083 * PARAMETERS: Cache - Handle to cache object
1084 * Object - The object to be released
1086 * RETURN: None
1088 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1089 * the object is deleted.
1091 ******************************************************************************/
1093 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1095 kmem_cache_free(cache, object);
1096 return (AE_OK);
1099 /******************************************************************************
1101 * FUNCTION: acpi_os_validate_interface
1103 * PARAMETERS: interface - Requested interface to be validated
1105 * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
1107 * DESCRIPTION: Match an interface string to the interfaces supported by the
1108 * host. Strings originate from an AML call to the _OSI method.
1110 *****************************************************************************/
1112 acpi_status
1113 acpi_os_validate_interface (char *interface)
1116 return AE_SUPPORT;
1120 /******************************************************************************
1122 * FUNCTION: acpi_os_validate_address
1124 * PARAMETERS: space_id - ACPI space ID
1125 * address - Physical address
1126 * length - Address length
1128 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
1129 * should return AE_AML_ILLEGAL_ADDRESS.
1131 * DESCRIPTION: Validate a system address via the host OS. Used to validate
1132 * the addresses accessed by AML operation regions.
1134 *****************************************************************************/
1136 acpi_status
1137 acpi_os_validate_address (
1138 u8 space_id,
1139 acpi_physical_address address,
1140 acpi_size length)
1143 return AE_OK;
1147 #endif