ia64/xen-unstable

view xen/include/asm-powerpc/page.h @ 12932:0379ac3367b2

[XEN][POWERPC] Use gmfn_to_mfn() over pfn2mfn()
Should only use pfn2mfn() if you care about the type of memory.
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Oct 02 21:40:26 2006 -0400 (2006-10-02)
parents 4da585fb62f9
children 8515e163f1df
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 */
22 #ifndef _ASM_PAGE_H
23 #define _ASM_PAGE_H
25 #define PAGE_SHIFT 12
26 #define PAGE_SIZE (1<<PAGE_SHIFT)
27 #define PAGE_MASK (~(PAGE_SIZE-1))
29 #ifndef __ASSEMBLY__
31 #include <xen/config.h>
32 #include <asm/cache.h>
33 #include <asm/debugger.h>
35 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
36 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
38 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
39 #define linear_l1_table \
40 ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
42 #define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT)
44 /*
45 * NB. We don't currently track I/O holes in the physical RAM space.
46 */
47 #define mfn_valid(mfn) ((mfn) < max_page)
49 #define virt_to_maddr(va) ((unsigned long)(va))
50 #define maddr_to_virt(ma) ((void *)((unsigned long)(ma)))
51 /* Shorthand versions of the above functions. */
52 #define __pa(x) (virt_to_maddr(x))
53 #define __va(x) (maddr_to_virt(x))
55 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
56 #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
57 #define mfn_to_virt(mfn) (maddr_to_virt(mfn << PAGE_SHIFT))
59 /* Convert between machine frame numbers and page-info structures. */
60 #define mfn_to_page(mfn) (frame_table + (mfn))
61 #define page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
63 /* Convert between machine addresses and page-info structures. */
64 #define maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
65 #define page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
67 /* Convert between Xen-heap virtual addresses and page-info structures. */
68 #define virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
69 #define page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
71 /* Convert between frame number and address formats. */
72 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
73 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
75 static __inline__ void clear_page(void *addr)
76 {
77 unsigned long lines, line_size;
79 line_size = cpu_caches.dline_size;
80 lines = cpu_caches.dlines_per_page;
82 __asm__ __volatile__(
83 "mtctr %1 # clear_page\n\
84 1: dcbz 0,%0\n\
85 add %0,%0,%3\n\
86 bdnz+ 1b"
87 : "=r" (addr)
88 : "r" (lines), "0" (addr), "r" (line_size)
89 : "ctr", "memory");
90 }
92 extern void copy_page(void *dp, void *sp);
94 #define linear_pg_table linear_l1_table
96 static inline int get_order(unsigned long size)
97 {
98 int order;
100 size = (size-1) >> (PAGE_SHIFT-1);
101 order = -1;
102 do {
103 size >>= 1;
104 order++;
105 } while (size);
106 return order;
107 }
109 /* XXX combine with get_order() above */
110 #define get_order_from_bytes get_order
111 static inline int get_order_from_pages(unsigned long nr_pages)
112 {
113 int order;
114 nr_pages--;
115 for ( order = 0; nr_pages; order++ )
116 nr_pages >>= 1;
117 return order;
118 }
120 #define __flush_tlb_one(__addr) \
121 __asm__ __volatile__("tlbie %0": :"r" (__addr): "memory")
123 #define _PAGE_PRESENT 0x001UL
124 #define _PAGE_RW 0x002UL
125 #define _PAGE_USER 0x004UL
126 #define _PAGE_PWT 0x008UL
127 #define _PAGE_PCD 0x010UL
128 #define _PAGE_ACCESSED 0x020UL
129 #define _PAGE_DIRTY 0x040UL
130 #define _PAGE_PAT 0x080UL
131 #define _PAGE_PSE 0x080UL
132 #define _PAGE_GLOBAL 0x100UL
133 #endif /* ! __ASSEMBLY__ */
134 #endif