ia64/linux-2.6.18-xen.hg

view include/asm-sh64/tlb.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * include/asm-sh64/tlb.h
3 *
4 * Copyright (C) 2003 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 */
11 #ifndef __ASM_SH64_TLB_H
12 #define __ASM_SH64_TLB_H
14 /*
15 * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED
16 * for head.S! Once this limitation is gone, we can clean the rest of this up.
17 */
19 /* ITLB defines */
20 #define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
21 #define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
23 /* DTLB defines */
24 #define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
25 #define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
27 #ifndef __ASSEMBLY__
29 /**
30 * for_each_dtlb_entry
31 *
32 * @tlb: TLB entry
33 *
34 * Iterate over free (non-wired) DTLB entries
35 */
36 #define for_each_dtlb_entry(tlb) \
37 for (tlb = cpu_data->dtlb.first; \
38 tlb <= cpu_data->dtlb.last; \
39 tlb += cpu_data->dtlb.step)
41 /**
42 * for_each_itlb_entry
43 *
44 * @tlb: TLB entry
45 *
46 * Iterate over free (non-wired) ITLB entries
47 */
48 #define for_each_itlb_entry(tlb) \
49 for (tlb = cpu_data->itlb.first; \
50 tlb <= cpu_data->itlb.last; \
51 tlb += cpu_data->itlb.step)
53 /**
54 * __flush_tlb_slot
55 *
56 * @slot: Address of TLB slot.
57 *
58 * Flushes TLB slot @slot.
59 */
60 static inline void __flush_tlb_slot(unsigned long long slot)
61 {
62 __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
63 }
65 /* arch/sh64/mm/tlb.c */
66 extern int sh64_tlb_init(void);
67 extern unsigned long long sh64_next_free_dtlb_entry(void);
68 extern unsigned long long sh64_get_wired_dtlb_entry(void);
69 extern int sh64_put_wired_dtlb_entry(unsigned long long entry);
71 extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr);
72 extern void sh64_teardown_tlb_slot(unsigned long long config_addr);
74 #define tlb_start_vma(tlb, vma) \
75 flush_cache_range(vma, vma->vm_start, vma->vm_end)
77 #define tlb_end_vma(tlb, vma) \
78 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
80 #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
82 /*
83 * Flush whole TLBs for MM
84 */
85 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
87 #include <asm-generic/tlb.h>
89 #endif /* __ASSEMBLY__ */
91 #endif /* __ASM_SH64_TLB_H */