ia64/xen-unstable

view linux-2.6-xen-sparse/arch/um/kernel/physmem.c @ 12272:8f552314e45a

[LINUX] Use pfn_to_page instead of relying in memory assumptions

This makes usage agnostic wrt the memory model being used.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
author kfraser@localhost.localdomain
date Tue Nov 07 09:28:18 2006 +0000 (2006-11-07)
parents 47a2eb32002a
children 4fad820a2233
line source
1 /*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
6 #include "linux/mm.h"
7 #include "linux/rbtree.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "linux/module.h"
12 #include "asm/types.h"
13 #include "asm/pgtable.h"
14 #include "kern_util.h"
15 #include "user_util.h"
16 #include "mode_kern.h"
17 #include "mem.h"
18 #include "mem_user.h"
19 #include "os.h"
20 #include "kern.h"
21 #include "init.h"
23 struct phys_desc {
24 struct rb_node rb;
25 int fd;
26 __u64 offset;
27 void *virt;
28 unsigned long phys;
29 struct list_head list;
30 };
32 static struct rb_root phys_mappings = RB_ROOT;
34 static struct rb_node **find_rb(void *virt)
35 {
36 struct rb_node **n = &phys_mappings.rb_node;
37 struct phys_desc *d;
39 while(*n != NULL){
40 d = rb_entry(*n, struct phys_desc, rb);
41 if(d->virt == virt)
42 return(n);
44 if(d->virt > virt)
45 n = &(*n)->rb_left;
46 else
47 n = &(*n)->rb_right;
48 }
50 return(n);
51 }
53 static struct phys_desc *find_phys_mapping(void *virt)
54 {
55 struct rb_node **n = find_rb(virt);
57 if(*n == NULL)
58 return(NULL);
60 return(rb_entry(*n, struct phys_desc, rb));
61 }
63 static void insert_phys_mapping(struct phys_desc *desc)
64 {
65 struct rb_node **n = find_rb(desc->virt);
67 if(*n != NULL)
68 panic("Physical remapping for %p already present",
69 desc->virt);
71 rb_link_node(&desc->rb, (*n)->rb_parent, n);
72 rb_insert_color(&desc->rb, &phys_mappings);
73 }
75 LIST_HEAD(descriptor_mappings);
77 struct desc_mapping {
78 int fd;
79 struct list_head list;
80 struct list_head pages;
81 };
83 static struct desc_mapping *find_mapping(int fd)
84 {
85 struct desc_mapping *desc;
86 struct list_head *ele;
88 list_for_each(ele, &descriptor_mappings){
89 desc = list_entry(ele, struct desc_mapping, list);
90 if(desc->fd == fd)
91 return(desc);
92 }
94 return(NULL);
95 }
97 static struct desc_mapping *descriptor_mapping(int fd)
98 {
99 struct desc_mapping *desc;
101 desc = find_mapping(fd);
102 if(desc != NULL)
103 return(desc);
105 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
106 if(desc == NULL)
107 return(NULL);
109 *desc = ((struct desc_mapping)
110 { .fd = fd,
111 .list = LIST_HEAD_INIT(desc->list),
112 .pages = LIST_HEAD_INIT(desc->pages) });
113 list_add(&desc->list, &descriptor_mappings);
115 return(desc);
116 }
118 int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
119 {
120 struct desc_mapping *fd_maps;
121 struct phys_desc *desc;
122 unsigned long phys;
123 int err;
125 fd_maps = descriptor_mapping(fd);
126 if(fd_maps == NULL)
127 return(-ENOMEM);
129 phys = __pa(virt);
130 desc = find_phys_mapping(virt);
131 if(desc != NULL)
132 panic("Address 0x%p is already substituted\n", virt);
134 err = -ENOMEM;
135 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
136 if(desc == NULL)
137 goto out;
139 *desc = ((struct phys_desc)
140 { .fd = fd,
141 .offset = offset,
142 .virt = virt,
143 .phys = __pa(virt),
144 .list = LIST_HEAD_INIT(desc->list) });
145 insert_phys_mapping(desc);
147 list_add(&desc->list, &fd_maps->pages);
149 virt = (void *) ((unsigned long) virt & PAGE_MASK);
150 err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
151 if(!err)
152 goto out;
154 rb_erase(&desc->rb, &phys_mappings);
155 kfree(desc);
156 out:
157 return(err);
158 }
160 static int physmem_fd = -1;
162 static void remove_mapping(struct phys_desc *desc)
163 {
164 void *virt = desc->virt;
165 int err;
167 rb_erase(&desc->rb, &phys_mappings);
168 list_del(&desc->list);
169 kfree(desc);
171 err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
172 if(err)
173 panic("Failed to unmap block device page from physical memory, "
174 "errno = %d", -err);
175 }
177 int physmem_remove_mapping(void *virt)
178 {
179 struct phys_desc *desc;
181 virt = (void *) ((unsigned long) virt & PAGE_MASK);
182 desc = find_phys_mapping(virt);
183 if(desc == NULL)
184 return(0);
186 remove_mapping(desc);
187 return(1);
188 }
190 void physmem_forget_descriptor(int fd)
191 {
192 struct desc_mapping *desc;
193 struct phys_desc *page;
194 struct list_head *ele, *next;
195 __u64 offset;
196 void *addr;
197 int err;
199 desc = find_mapping(fd);
200 if(desc == NULL)
201 return;
203 list_for_each_safe(ele, next, &desc->pages){
204 page = list_entry(ele, struct phys_desc, list);
205 offset = page->offset;
206 addr = page->virt;
207 remove_mapping(page);
208 err = os_seek_file(fd, offset);
209 if(err)
210 panic("physmem_forget_descriptor - failed to seek "
211 "to %lld in fd %d, error = %d\n",
212 offset, fd, -err);
213 err = os_read_file(fd, addr, PAGE_SIZE);
214 if(err < 0)
215 panic("physmem_forget_descriptor - failed to read "
216 "from fd %d to 0x%p, error = %d\n",
217 fd, addr, -err);
218 }
220 list_del(&desc->list);
221 kfree(desc);
222 }
224 EXPORT_SYMBOL(physmem_forget_descriptor);
225 EXPORT_SYMBOL(physmem_remove_mapping);
226 EXPORT_SYMBOL(physmem_subst_mapping);
228 int arch_free_page(struct page *page, int order)
229 {
230 void *virt;
231 int i;
233 for(i = 0; i < (1 << order); i++){
234 virt = __va(page_to_phys(page + i));
235 physmem_remove_mapping(virt);
236 }
238 return 0;
239 }
241 int is_remapped(void *virt)
242 {
243 struct phys_desc *desc = find_phys_mapping(virt);
245 return(desc != NULL);
246 }
248 /* Changed during early boot */
249 unsigned long high_physmem;
251 extern unsigned long long physmem_size;
253 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
254 {
255 struct page *p, *map;
256 unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
257 unsigned long iomem_len, iomem_pages, total_len, total_pages;
258 int i;
260 phys_pages = physmem >> PAGE_SHIFT;
261 phys_len = phys_pages * sizeof(struct page);
263 iomem_pages = iomem >> PAGE_SHIFT;
264 iomem_len = iomem_pages * sizeof(struct page);
266 highmem_pages = highmem >> PAGE_SHIFT;
267 highmem_len = highmem_pages * sizeof(struct page);
269 total_pages = phys_pages + iomem_pages + highmem_pages;
270 total_len = phys_len + iomem_len + highmem_len;
272 if(kmalloc_ok){
273 map = kmalloc(total_len, GFP_KERNEL);
274 if(map == NULL)
275 map = vmalloc(total_len);
276 }
277 else map = alloc_bootmem_low_pages(total_len);
279 if(map == NULL)
280 return(-ENOMEM);
282 for(i = 0; i < total_pages; i++){
283 p = &map[i];
284 set_page_count(p, 0);
285 SetPageReserved(p);
286 INIT_LIST_HEAD(&p->lru);
287 }
289 max_mapnr = total_pages;
290 return(0);
291 }
293 /* Changed during early boot */
294 static unsigned long kmem_top = 0;
296 unsigned long get_kmem_end(void)
297 {
298 if(kmem_top == 0)
299 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
300 return(kmem_top);
301 }
303 void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
304 int r, int w, int x)
305 {
306 __u64 offset;
307 int fd, err;
309 fd = phys_mapping(phys, &offset);
310 err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
311 if(err) {
312 if(err == -ENOMEM)
313 printk("try increasing the host's "
314 "/proc/sys/vm/max_map_count to <physical "
315 "memory size>/4096\n");
316 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
317 "err = %d\n", virt, fd, offset, len, r, w, x, err);
318 }
319 }
321 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
323 extern int __syscall_stub_start, __binary_start;
325 void setup_physmem(unsigned long start, unsigned long reserve_end,
326 unsigned long len, unsigned long long highmem)
327 {
328 unsigned long reserve = reserve_end - start;
329 int pfn = PFN_UP(__pa(reserve_end));
330 int delta = (len - reserve) >> PAGE_SHIFT;
331 int err, offset, bootmap_size;
333 physmem_fd = create_mem_file(len + highmem);
335 offset = uml_reserved - uml_physmem;
336 err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
337 len - offset, 1, 1, 0);
338 if(err < 0){
339 os_print_error(err, "Mapping memory");
340 exit(1);
341 }
343 /* Special kludge - This page will be mapped in to userspace processes
344 * from physmem_fd, so it needs to be written out there.
345 */
346 os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
347 os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
349 bootmap_size = init_bootmem(pfn, pfn + delta);
350 free_bootmem(__pa(reserve_end) + bootmap_size,
351 len - bootmap_size - reserve);
352 }
354 int phys_mapping(unsigned long phys, __u64 *offset_out)
355 {
356 struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
357 int fd = -1;
359 if(desc != NULL){
360 fd = desc->fd;
361 *offset_out = desc->offset;
362 }
363 else if(phys < physmem_size){
364 fd = physmem_fd;
365 *offset_out = phys;
366 }
367 else if(phys < __pa(end_iomem)){
368 struct iomem_region *region = iomem_regions;
370 while(region != NULL){
371 if((phys >= region->phys) &&
372 (phys < region->phys + region->size)){
373 fd = region->fd;
374 *offset_out = phys - region->phys;
375 break;
376 }
377 region = region->next;
378 }
379 }
380 else if(phys < __pa(end_iomem) + highmem){
381 fd = physmem_fd;
382 *offset_out = phys - iomem_size;
383 }
385 return(fd);
386 }
388 static int __init uml_mem_setup(char *line, int *add)
389 {
390 char *retptr;
391 physmem_size = memparse(line,&retptr);
392 return 0;
393 }
394 __uml_setup("mem=", uml_mem_setup,
395 "mem=<Amount of desired ram>\n"
396 " This controls how much \"physical\" memory the kernel allocates\n"
397 " for the system. The size is specified as a number followed by\n"
398 " one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
399 " This is not related to the amount of memory in the host. It can\n"
400 " be more, and the excess, if it's ever used, will just be swapped out.\n"
401 " Example: mem=64M\n\n"
402 );
404 unsigned long find_iomem(char *driver, unsigned long *len_out)
405 {
406 struct iomem_region *region = iomem_regions;
408 while(region != NULL){
409 if(!strcmp(region->driver, driver)){
410 *len_out = region->size;
411 return(region->virt);
412 }
413 }
415 return(0);
416 }
418 int setup_iomem(void)
419 {
420 struct iomem_region *region = iomem_regions;
421 unsigned long iomem_start = high_physmem + PAGE_SIZE;
422 int err;
424 while(region != NULL){
425 err = os_map_memory((void *) iomem_start, region->fd, 0,
426 region->size, 1, 1, 0);
427 if(err)
428 printk("Mapping iomem region for driver '%s' failed, "
429 "errno = %d\n", region->driver, -err);
430 else {
431 region->virt = iomem_start;
432 region->phys = __pa(region->virt);
433 }
435 iomem_start += region->size + PAGE_SIZE;
436 region = region->next;
437 }
439 return(0);
440 }
442 __initcall(setup_iomem);
444 /*
445 * Overrides for Emacs so that we follow Linus's tabbing style.
446 * Emacs will notice this stuff at the end of the file and automatically
447 * adjust the settings for this buffer only. This must remain at the end
448 * of the file.
449 * ---------------------------------------------------------------------------
450 * Local variables:
451 * c-file-style: "linux"
452 * End:
453 */