ia64/linux-2.6.18-xen.hg

view arch/cris/mm/tlb.c @ 647:a5bb490065f6

Fix the build after public header sync.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 13 14:01:49 2008 +0100 (2008-08-13)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/cris/mm/tlb.c
3 *
4 * Copyright (C) 2000, 2001 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen (bjornw@axis.com)
7 *
8 */
10 #include <linux/init.h>
11 #include <asm/tlb.h>
13 #define D(x)
15 /* The TLB can host up to 64 different mm contexts at the same time.
16 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
17 * page_id that has to match to give a hit. In page_id_map, we keep track
18 * of which mm's we have assigned which page_id's, so that we know when
19 * to invalidate TLB entries.
20 *
21 * The last page_id is never running - it is used as an invalid page_id
22 * so we can make TLB entries that will never match.
23 *
24 * Notice that we need to make the flushes atomic, otherwise an interrupt
25 * handler that uses vmalloced memory might cause a TLB load in the middle
26 * of a flush causing.
27 */
29 struct mm_struct *page_id_map[NUM_PAGEID];
30 static int map_replace_ptr = 1; /* which page_id_map entry to replace next */
32 /* the following functions are similar to those used in the PPC port */
34 static inline void
35 alloc_context(struct mm_struct *mm)
36 {
37 struct mm_struct *old_mm;
39 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm));
41 /* did we replace an mm ? */
43 old_mm = page_id_map[map_replace_ptr];
45 if(old_mm) {
46 /* throw out any TLB entries belonging to the mm we replace
47 * in the map
48 */
49 flush_tlb_mm(old_mm);
51 old_mm->context.page_id = NO_CONTEXT;
52 }
54 /* insert it into the page_id_map */
56 mm->context.page_id = map_replace_ptr;
57 page_id_map[map_replace_ptr] = mm;
59 map_replace_ptr++;
61 if(map_replace_ptr == INVALID_PAGEID)
62 map_replace_ptr = 0; /* wrap around */
63 }
65 /*
66 * if needed, get a new MMU context for the mm. otherwise nothing is done.
67 */
69 void
70 get_mmu_context(struct mm_struct *mm)
71 {
72 if(mm->context.page_id == NO_CONTEXT)
73 alloc_context(mm);
74 }
76 /* called by __exit_mm to destroy the used MMU context if any before
77 * destroying the mm itself. this is only called when the last user of the mm
78 * drops it.
79 *
80 * the only thing we really need to do here is mark the used PID slot
81 * as empty.
82 */
84 void
85 destroy_context(struct mm_struct *mm)
86 {
87 if(mm->context.page_id != NO_CONTEXT) {
88 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm));
89 flush_tlb_mm(mm); /* TODO this might be redundant ? */
90 page_id_map[mm->context.page_id] = NULL;
91 }
92 }
94 /* called once during VM initialization, from init.c */
96 void __init
97 tlb_init(void)
98 {
99 int i;
101 /* clear the page_id map */
103 for (i = 1; i < sizeof (page_id_map) / sizeof (page_id_map[0]); i++)
104 page_id_map[i] = NULL;
106 /* invalidate the entire TLB */
108 flush_tlb_all();
110 /* the init_mm has context 0 from the boot */
112 page_id_map[0] = &init_mm;
113 }