ia64/xen-unstable

view xen/arch/ia64/xen/vhpt.c @ 9748:2f86b84d0483

[IA64] more cleanup in vhpt.h

VHPT_CCHAIN_LOOKUP removed, body is now inlined in ivt.S
vhpt_insert() is now written in C.
Cleanup within vhpt.c/.h

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Apr 21 09:06:38 2006 -0600 (2006-04-21)
parents b09e8f46c9f6
children 77ccce98ddef
line source
1 /*
2 * Initialize VHPT support.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <asm/processor.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/dma.h>
16 #include <asm/vhpt.h>
18 extern long running_on_sim;
20 DEFINE_PER_CPU (unsigned long, vhpt_paddr);
21 DEFINE_PER_CPU (unsigned long, vhpt_pend);
23 void vhpt_flush(void)
24 {
25 struct vhpt_lf_entry *v =__va(__ia64_per_cpu_var(vhpt_paddr));
26 int i;
27 #if 0
28 static int firsttime = 2;
30 if (firsttime) firsttime--;
31 else {
32 printf("vhpt_flush: *********************************************\n");
33 printf("vhpt_flush: *********************************************\n");
34 printf("vhpt_flush: *********************************************\n");
35 printf("vhpt_flush: flushing vhpt (seems to crash at rid wrap?)...\n");
36 printf("vhpt_flush: *********************************************\n");
37 printf("vhpt_flush: *********************************************\n");
38 printf("vhpt_flush: *********************************************\n");
39 }
40 #endif
41 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
42 v->itir = 0;
43 v->CChain = 0;
44 v->page_flags = 0;
45 v->ti_tag = INVALID_TI_TAG;
46 }
47 // initialize cache too???
48 }
50 #ifdef VHPT_GLOBAL
51 void vhpt_flush_address(unsigned long vadr, unsigned long addr_range)
52 {
53 struct vhpt_lf_entry *vlfe;
55 if ((vadr >> 61) == 7) {
56 // no vhpt for region 7 yet, see vcpu_itc_no_srlz
57 printf("vhpt_flush_address: region 7, spinning...\n");
58 while(1);
59 }
60 #if 0
61 // this only seems to occur at shutdown, but it does occur
62 if ((!addr_range) || addr_range & (addr_range - 1)) {
63 printf("vhpt_flush_address: weird range, spinning...\n");
64 while(1);
65 }
66 //printf("************** vhpt_flush_address(%p,%p)\n",vadr,addr_range);
67 #endif
68 while ((long)addr_range > 0) {
69 vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
70 // FIXME: for now, just blow it away even if it belongs to
71 // another domain. Later, use ttag to check for match
72 //if (!(vlfe->ti_tag & INVALID_TI_TAG)) {
73 //printf("vhpt_flush_address: blowing away valid tag for vadr=%p\n",vadr);
74 //}
75 vlfe->ti_tag |= INVALID_TI_TAG;
76 addr_range -= PAGE_SIZE;
77 vadr += PAGE_SIZE;
78 }
79 }
81 void vhpt_flush_address_remote(int cpu,
82 unsigned long vadr, unsigned long addr_range)
83 {
84 while ((long)addr_range > 0) {
85 /* Get the VHPT entry. */
86 unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
87 volatile struct vhpt_lf_entry *v;
88 v =__va(per_cpu(vhpt_paddr, cpu) + off);
89 v->ti_tag = INVALID_TI_TAG;
90 addr_range -= PAGE_SIZE;
91 vadr += PAGE_SIZE;
92 }
93 }
94 #endif
96 static void vhpt_map(unsigned long pte)
97 {
98 unsigned long psr;
100 psr = ia64_clear_ic();
101 ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
102 ia64_set_psr(psr);
103 ia64_srlz_i();
104 }
106 void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
107 {
108 struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
109 unsigned long tag = ia64_ttag (vadr);
111 /* No need to first disable the entry, since VHPT is per LP
112 and VHPT is TR mapped. */
113 vlfe->itir = logps;
114 vlfe->page_flags = pte | _PAGE_P;
115 vlfe->ti_tag = tag;
116 }
118 void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
119 {
120 unsigned long mask = (1L << logps) - 1;
121 int i;
123 if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
124 // if this happens, we may want to revisit this algorithm
125 panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
126 }
127 if (logps-PAGE_SHIFT > 2) {
128 // FIXME: Should add counter here to see how often this
129 // happens (e.g. for 16MB pages!) and determine if it
130 // is a performance problem. On a quick look, it takes
131 // about 39000 instrs for a 16MB page and it seems to occur
132 // only a few times/second, so OK for now.
133 // An alternate solution would be to just insert the one
134 // 16KB in the vhpt (but with the full mapping)?
135 //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
136 //"va=%p, pa=%p, pa-masked=%p\n",
137 //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
138 //(pte&_PFN_MASK)&~mask);
139 }
140 vaddr &= ~mask;
141 pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
142 for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
143 vhpt_insert(vaddr,pte,logps<<2);
144 vaddr += PAGE_SIZE;
145 }
146 }
148 void vhpt_init(void)
149 {
150 unsigned long vhpt_total_size, vhpt_alignment;
151 unsigned long paddr, pte;
152 struct page_info *page;
153 #if !VHPT_ENABLED
154 return;
155 #endif
156 // allocate a huge chunk of physical memory.... how???
157 vhpt_total_size = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
158 vhpt_alignment = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
159 printf("vhpt_init: vhpt size=0x%lx, align=0x%lx\n",
160 vhpt_total_size, vhpt_alignment);
161 /* This allocation only holds true if vhpt table is unique for
162 * all domains. Or else later new vhpt table should be allocated
163 * from domain heap when each domain is created. Assume xen buddy
164 * allocator can provide natural aligned page by order?
165 */
166 page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
167 if (!page)
168 panic("vhpt_init: can't allocate VHPT!\n");
169 paddr = page_to_maddr(page);
170 __get_cpu_var(vhpt_paddr) = paddr;
171 __get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
172 printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
173 paddr, __get_cpu_var(vhpt_pend));
174 pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
175 vhpt_map(pte);
176 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
177 VHPT_ENABLED);
178 vhpt_flush();
179 }
182 void zero_vhpt_stats(void)
183 {
184 return;
185 }
187 int dump_vhpt_stats(char *buf)
188 {
189 int i;
190 char *s = buf;
191 struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
192 unsigned long vhpt_valid = 0, vhpt_chains = 0;
194 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
195 if (!(v->ti_tag & INVALID_TI_TAG)) vhpt_valid++;
196 if (v->CChain) vhpt_chains++;
197 }
198 s += sprintf(s,"VHPT usage: %ld/%ld (%ld collision chains)\n",
199 vhpt_valid, (unsigned long) VHPT_NUM_ENTRIES, vhpt_chains);
200 return s - buf;
201 }