ia64/linux-2.6.18-xen.hg

view kernel/resource.c @ 912:dd42cdb0ab89

[IA64] Build blktap2 driver by default in x86 builds.

add CONFIG_XEN_BLKDEV_TAP2=y to buildconfigs/linux-defconfig_xen_ia64.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 12:09:16 2009 +0900 (2009-06-29)
parents 20813f115a81
children
line source
1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/ioport.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/fs.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <asm/io.h>
23 struct resource ioport_resource = {
24 .name = "PCI IO",
25 .start = 0,
26 .end = IO_SPACE_LIMIT,
27 .flags = IORESOURCE_IO,
28 };
29 EXPORT_SYMBOL(ioport_resource);
31 struct resource iomem_resource = {
32 .name = "PCI mem",
33 .start = 0,
34 .end = -1,
35 .flags = IORESOURCE_MEM,
36 };
37 EXPORT_SYMBOL(iomem_resource);
39 #ifdef CONFIG_PROC_IOMEM_MACHINE
40 struct resource iomem_machine_resource = {
41 .name = "Machine PCI mem",
42 .start = 0,
43 .end = -1,
44 .flags = IORESOURCE_MEM,
45 };
46 EXPORT_SYMBOL(iomem_machine_resource);
47 #endif
49 static DEFINE_RWLOCK(resource_lock);
51 #ifdef CONFIG_PROC_FS
53 enum { MAX_IORES_LEVEL = 5 };
55 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
56 {
57 struct resource *p = v;
58 (*pos)++;
59 if (p->child)
60 return p->child;
61 while (!p->sibling && p->parent)
62 p = p->parent;
63 return p->sibling;
64 }
66 static void *r_start(struct seq_file *m, loff_t *pos)
67 __acquires(resource_lock)
68 {
69 struct resource *p = m->private;
70 loff_t l = 0;
71 read_lock(&resource_lock);
72 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
73 ;
74 return p;
75 }
77 static void r_stop(struct seq_file *m, void *v)
78 __releases(resource_lock)
79 {
80 read_unlock(&resource_lock);
81 }
83 static int r_show(struct seq_file *m, void *v)
84 {
85 struct resource *root = m->private;
86 struct resource *r = v, *p;
87 int width = root->end < 0x10000 ? 4 : 8;
88 int depth;
90 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
91 if (p->parent == root)
92 break;
93 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
94 depth * 2, "",
95 width, (unsigned long long) r->start,
96 width, (unsigned long long) r->end,
97 r->name ? r->name : "<BAD>");
98 return 0;
99 }
101 static struct seq_operations resource_op = {
102 .start = r_start,
103 .next = r_next,
104 .stop = r_stop,
105 .show = r_show,
106 };
108 static int ioports_open(struct inode *inode, struct file *file)
109 {
110 int res = seq_open(file, &resource_op);
111 if (!res) {
112 struct seq_file *m = file->private_data;
113 m->private = &ioport_resource;
114 }
115 return res;
116 }
118 static int iomem_open(struct inode *inode, struct file *file)
119 {
120 int res = seq_open(file, &resource_op);
121 if (!res) {
122 struct seq_file *m = file->private_data;
123 m->private = &iomem_resource;
124 }
125 return res;
126 }
128 #ifdef CONFIG_PROC_IOMEM_MACHINE
129 static int iomem_machine_open(struct inode *inode, struct file *file)
130 {
131 int res = seq_open(file, &resource_op);
132 if (!res) {
133 struct seq_file *m = file->private_data;
134 m->private = &iomem_machine_resource;
135 }
136 return res;
137 }
138 #endif
140 static struct file_operations proc_ioports_operations = {
141 .open = ioports_open,
142 .read = seq_read,
143 .llseek = seq_lseek,
144 .release = seq_release,
145 };
147 static struct file_operations proc_iomem_operations = {
148 .open = iomem_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = seq_release,
152 };
154 #ifdef CONFIG_PROC_IOMEM_MACHINE
155 static struct file_operations proc_iomem_machine_operations = {
156 .open = iomem_machine_open,
157 .read = seq_read,
158 .llseek = seq_lseek,
159 .release = seq_release,
160 };
161 #endif
163 static int __init ioresources_init(void)
164 {
165 struct proc_dir_entry *entry;
167 entry = create_proc_entry("ioports", 0, NULL);
168 if (entry)
169 entry->proc_fops = &proc_ioports_operations;
170 entry = create_proc_entry("iomem", 0, NULL);
171 if (entry)
172 entry->proc_fops = &proc_iomem_operations;
173 #ifdef CONFIG_PROC_IOMEM_MACHINE
174 if (is_initial_xendomain()) {
175 entry = create_proc_entry("iomem_machine", 0, NULL);
176 if (entry)
177 entry->proc_fops = &proc_iomem_machine_operations;
178 }
179 #endif
180 return 0;
181 }
182 __initcall(ioresources_init);
184 #endif /* CONFIG_PROC_FS */
186 /* Return the conflict entry if you can't request it */
187 static struct resource * __request_resource(struct resource *root, struct resource *new)
188 {
189 resource_size_t start = new->start;
190 resource_size_t end = new->end;
191 struct resource *tmp, **p;
193 if (end < start)
194 return root;
195 if (start < root->start)
196 return root;
197 if (end > root->end)
198 return root;
199 p = &root->child;
200 for (;;) {
201 tmp = *p;
202 if (!tmp || tmp->start > end) {
203 new->sibling = tmp;
204 *p = new;
205 new->parent = root;
206 return NULL;
207 }
208 p = &tmp->sibling;
209 if (tmp->end < start)
210 continue;
211 return tmp;
212 }
213 }
215 static int __release_resource(struct resource *old)
216 {
217 struct resource *tmp, **p;
219 p = &old->parent->child;
220 for (;;) {
221 tmp = *p;
222 if (!tmp)
223 break;
224 if (tmp == old) {
225 *p = tmp->sibling;
226 old->parent = NULL;
227 return 0;
228 }
229 p = &tmp->sibling;
230 }
231 return -EINVAL;
232 }
234 int request_resource(struct resource *root, struct resource *new)
235 {
236 struct resource *conflict;
238 write_lock(&resource_lock);
239 conflict = __request_resource(root, new);
240 write_unlock(&resource_lock);
241 return conflict ? -EBUSY : 0;
242 }
244 EXPORT_SYMBOL(request_resource);
246 struct resource *____request_resource(struct resource *root, struct resource *new)
247 {
248 struct resource *conflict;
250 write_lock(&resource_lock);
251 conflict = __request_resource(root, new);
252 write_unlock(&resource_lock);
253 return conflict;
254 }
256 EXPORT_SYMBOL(____request_resource);
258 int release_resource(struct resource *old)
259 {
260 int retval;
262 write_lock(&resource_lock);
263 retval = __release_resource(old);
264 write_unlock(&resource_lock);
265 return retval;
266 }
268 EXPORT_SYMBOL(release_resource);
270 #ifdef CONFIG_MEMORY_HOTPLUG
271 /*
272 * Finds the lowest memory reosurce exists within [res->start.res->end)
273 * the caller must specify res->start, res->end, res->flags.
274 * If found, returns 0, res is overwritten, if not found, returns -1.
275 */
276 int find_next_system_ram(struct resource *res)
277 {
278 resource_size_t start, end;
279 struct resource *p;
281 BUG_ON(!res);
283 start = res->start;
284 end = res->end;
285 BUG_ON(start >= end);
287 read_lock(&resource_lock);
288 for (p = iomem_resource.child; p ; p = p->sibling) {
289 /* system ram is just marked as IORESOURCE_MEM */
290 if (p->flags != res->flags)
291 continue;
292 if (p->start > end) {
293 p = NULL;
294 break;
295 }
296 if ((p->end >= start) && (p->start < end))
297 break;
298 }
299 read_unlock(&resource_lock);
300 if (!p)
301 return -1;
302 /* copy data */
303 if (res->start < p->start)
304 res->start = p->start;
305 if (res->end > p->end)
306 res->end = p->end;
307 return 0;
308 }
309 #endif
311 /*
312 * Find empty slot in the resource tree given range and alignment.
313 */
314 static int find_resource(struct resource *root, struct resource *new,
315 resource_size_t size, resource_size_t min,
316 resource_size_t max, resource_size_t align,
317 void (*alignf)(void *, struct resource *,
318 resource_size_t, resource_size_t),
319 void *alignf_data)
320 {
321 struct resource *this = root->child;
323 new->start = root->start;
324 /*
325 * Skip past an allocated resource that starts at 0, since the assignment
326 * of this->start - 1 to new->end below would cause an underflow.
327 */
328 if (this && this->start == 0) {
329 new->start = this->end + 1;
330 this = this->sibling;
331 }
332 for(;;) {
333 if (this)
334 new->end = this->start - 1;
335 else
336 new->end = root->end;
337 if (new->start < min)
338 new->start = min;
339 if (new->end > max)
340 new->end = max;
341 new->start = ALIGN(new->start, align);
342 if (alignf)
343 alignf(alignf_data, new, size, align);
344 if (new->start < new->end && new->end - new->start >= size - 1) {
345 new->end = new->start + size - 1;
346 return 0;
347 }
348 if (!this)
349 break;
350 new->start = this->end + 1;
351 this = this->sibling;
352 }
353 return -EBUSY;
354 }
356 /*
357 * Allocate empty slot in the resource tree given range and alignment.
358 */
359 int allocate_resource(struct resource *root, struct resource *new,
360 resource_size_t size, resource_size_t min,
361 resource_size_t max, resource_size_t align,
362 void (*alignf)(void *, struct resource *,
363 resource_size_t, resource_size_t),
364 void *alignf_data)
365 {
366 int err;
368 write_lock(&resource_lock);
369 err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
370 if (err >= 0 && __request_resource(root, new))
371 err = -EBUSY;
372 write_unlock(&resource_lock);
373 return err;
374 }
376 EXPORT_SYMBOL(allocate_resource);
378 /**
379 * insert_resource - Inserts a resource in the resource tree
380 * @parent: parent of the new resource
381 * @new: new resource to insert
382 *
383 * Returns 0 on success, -EBUSY if the resource can't be inserted.
384 *
385 * This function is equivalent of request_resource when no conflict
386 * happens. If a conflict happens, and the conflicting resources
387 * entirely fit within the range of the new resource, then the new
388 * resource is inserted and the conflicting resources become childs of
389 * the new resource. Otherwise the new resource becomes the child of
390 * the conflicting resource
391 */
392 int insert_resource(struct resource *parent, struct resource *new)
393 {
394 int result;
395 struct resource *first, *next;
397 write_lock(&resource_lock);
398 begin:
399 result = 0;
400 first = __request_resource(parent, new);
401 if (!first)
402 goto out;
404 result = -EBUSY;
405 if (first == parent)
406 goto out;
408 /* Resource fully contained by the clashing resource? Recurse into it */
409 if (first->start <= new->start && first->end >= new->end) {
410 parent = first;
411 goto begin;
412 }
414 for (next = first; ; next = next->sibling) {
415 /* Partial overlap? Bad, and unfixable */
416 if (next->start < new->start || next->end > new->end)
417 goto out;
418 if (!next->sibling)
419 break;
420 if (next->sibling->start > new->end)
421 break;
422 }
424 result = 0;
426 new->parent = parent;
427 new->sibling = next->sibling;
428 new->child = first;
430 next->sibling = NULL;
431 for (next = first; next; next = next->sibling)
432 next->parent = new;
434 if (parent->child == first) {
435 parent->child = new;
436 } else {
437 next = parent->child;
438 while (next->sibling != first)
439 next = next->sibling;
440 next->sibling = new;
441 }
443 out:
444 write_unlock(&resource_lock);
445 return result;
446 }
448 /*
449 * Given an existing resource, change its start and size to match the
450 * arguments. Returns -EBUSY if it can't fit. Existing children of
451 * the resource are assumed to be immutable.
452 */
453 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
454 {
455 struct resource *tmp, *parent = res->parent;
456 resource_size_t end = start + size - 1;
457 int result = -EBUSY;
459 write_lock(&resource_lock);
461 if ((start < parent->start) || (end > parent->end))
462 goto out;
464 for (tmp = res->child; tmp; tmp = tmp->sibling) {
465 if ((tmp->start < start) || (tmp->end > end))
466 goto out;
467 }
469 if (res->sibling && (res->sibling->start <= end))
470 goto out;
472 tmp = parent->child;
473 if (tmp != res) {
474 while (tmp->sibling != res)
475 tmp = tmp->sibling;
476 if (start <= tmp->end)
477 goto out;
478 }
480 res->start = start;
481 res->end = end;
482 result = 0;
484 out:
485 write_unlock(&resource_lock);
486 return result;
487 }
489 EXPORT_SYMBOL(adjust_resource);
491 /*
492 * This is compatibility stuff for IO resources.
493 *
494 * Note how this, unlike the above, knows about
495 * the IO flag meanings (busy etc).
496 *
497 * Request-region creates a new busy region.
498 *
499 * Check-region returns non-zero if the area is already busy
500 *
501 * Release-region releases a matching busy region.
502 */
503 struct resource * __request_region(struct resource *parent,
504 resource_size_t start, resource_size_t n,
505 const char *name)
506 {
507 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
509 if (res) {
510 res->name = name;
511 res->start = start;
512 res->end = start + n - 1;
513 res->flags = IORESOURCE_BUSY;
515 write_lock(&resource_lock);
517 for (;;) {
518 struct resource *conflict;
520 conflict = __request_resource(parent, res);
521 if (!conflict)
522 break;
523 if (conflict != parent) {
524 parent = conflict;
525 if (!(conflict->flags & IORESOURCE_BUSY))
526 continue;
527 }
529 /* Uhhuh, that didn't work out.. */
530 kfree(res);
531 res = NULL;
532 break;
533 }
534 write_unlock(&resource_lock);
535 }
536 return res;
537 }
539 EXPORT_SYMBOL(__request_region);
541 int __check_region(struct resource *parent, resource_size_t start,
542 resource_size_t n)
543 {
544 struct resource * res;
546 res = __request_region(parent, start, n, "check-region");
547 if (!res)
548 return -EBUSY;
550 release_resource(res);
551 kfree(res);
552 return 0;
553 }
555 EXPORT_SYMBOL(__check_region);
557 void __release_region(struct resource *parent, resource_size_t start,
558 resource_size_t n)
559 {
560 struct resource **p;
561 resource_size_t end;
563 p = &parent->child;
564 end = start + n - 1;
566 write_lock(&resource_lock);
568 for (;;) {
569 struct resource *res = *p;
571 if (!res)
572 break;
573 if (res->start <= start && res->end >= end) {
574 if (!(res->flags & IORESOURCE_BUSY)) {
575 p = &res->child;
576 continue;
577 }
578 if (res->start != start || res->end != end)
579 break;
580 *p = res->sibling;
581 write_unlock(&resource_lock);
582 kfree(res);
583 return;
584 }
585 p = &res->sibling;
586 }
588 write_unlock(&resource_lock);
590 printk(KERN_WARNING "Trying to free nonexistent resource "
591 "<%016llx-%016llx>\n", (unsigned long long)start,
592 (unsigned long long)end);
593 }
595 EXPORT_SYMBOL(__release_region);
597 /*
598 * Called from init/main.c to reserve IO ports.
599 */
600 #define MAXRESERVE 4
601 static int __init reserve_setup(char *str)
602 {
603 static int reserved;
604 static struct resource reserve[MAXRESERVE];
606 for (;;) {
607 int io_start, io_num;
608 int x = reserved;
610 if (get_option (&str, &io_start) != 2)
611 break;
612 if (get_option (&str, &io_num) == 0)
613 break;
614 if (x < MAXRESERVE) {
615 struct resource *res = reserve + x;
616 res->name = "reserved";
617 res->start = io_start;
618 res->end = io_start + io_num - 1;
619 res->flags = IORESOURCE_BUSY;
620 res->child = NULL;
621 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
622 reserved = x+1;
623 }
624 }
625 return 1;
626 }
628 __setup("reserve=", reserve_setup);