ia64/linux-2.6.18-xen.hg

view arch/m68k/sun3x/dvma.c @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * Virtual DMA allocation
3 *
4 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
5 *
6 * 11/26/2000 -- disabled the existing code because it didn't work for
7 * me in 2.4. Replaced with a significantly more primitive version
8 * similar to the sun3 code. the old functionality was probably more
9 * desirable, but.... -- Sam Creasey (sammy@oh.verio.com)
10 *
11 */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/mm.h>
17 #include <linux/bootmem.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <asm/sun3x.h>
22 #include <asm/dvma.h>
23 #include <asm/io.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/pgalloc.h>
28 /* IOMMU support */
30 #define IOMMU_ADDR_MASK 0x03ffe000
31 #define IOMMU_CACHE_INHIBIT 0x00000040
32 #define IOMMU_FULL_BLOCK 0x00000020
33 #define IOMMU_MODIFIED 0x00000010
34 #define IOMMU_USED 0x00000008
35 #define IOMMU_WRITE_PROTECT 0x00000004
36 #define IOMMU_DT_MASK 0x00000003
37 #define IOMMU_DT_INVALID 0x00000000
38 #define IOMMU_DT_VALID 0x00000001
39 #define IOMMU_DT_BAD 0x00000002
42 static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
45 #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK)
46 #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \
47 (paddr & (DVMA_PAGE_SIZE-1)))
48 #if 0
49 #define dvma_entry_set(index,addr) (iommu_pte[index] = \
50 (addr & IOMMU_ADDR_MASK) | \
51 IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
52 #else
53 #define dvma_entry_set(index,addr) (iommu_pte[index] = \
54 (addr & IOMMU_ADDR_MASK) | \
55 IOMMU_DT_VALID)
56 #endif
57 #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID)
58 #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \
59 ((addr & 0x03c00000) >> \
60 (DVMA_PAGE_SHIFT+4)))
62 #undef DEBUG
64 #ifdef DEBUG
65 /* code to print out a dvma mapping for debugging purposes */
66 void dvma_print (unsigned long dvma_addr)
67 {
69 unsigned long index;
71 index = dvma_addr >> DVMA_PAGE_SHIFT;
73 printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
74 dvma_entry_paddr(index));
77 }
78 #endif
81 /* create a virtual mapping for a page assigned within the IOMMU
82 so that the cpu can reach it easily */
83 inline int dvma_map_cpu(unsigned long kaddr,
84 unsigned long vaddr, int len)
85 {
86 pgd_t *pgd;
87 unsigned long end;
88 int ret = 0;
90 kaddr &= PAGE_MASK;
91 vaddr &= PAGE_MASK;
93 end = PAGE_ALIGN(vaddr + len);
95 #ifdef DEBUG
96 printk("dvma: mapping kern %08lx to virt %08lx\n",
97 kaddr, vaddr);
98 #endif
99 pgd = pgd_offset_k(vaddr);
101 do {
102 pmd_t *pmd;
103 unsigned long end2;
105 if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
106 ret = -ENOMEM;
107 goto out;
108 }
110 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
111 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
112 else
113 end2 = end;
115 do {
116 pte_t *pte;
117 unsigned long end3;
119 if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
120 ret = -ENOMEM;
121 goto out;
122 }
124 if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
125 end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
126 else
127 end3 = end2;
129 do {
130 #ifdef DEBUG
131 printk("mapping %08lx phys to %08lx\n",
132 __pa(kaddr), vaddr);
133 #endif
134 set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
135 PAGE_KERNEL));
136 pte++;
137 kaddr += PAGE_SIZE;
138 vaddr += PAGE_SIZE;
139 } while(vaddr < end3);
141 } while(vaddr < end2);
143 } while(vaddr < end);
145 flush_tlb_all();
147 out:
148 return ret;
149 }
152 inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
153 int len)
154 {
155 unsigned long end, index;
157 index = baddr >> DVMA_PAGE_SHIFT;
158 end = ((baddr+len) >> DVMA_PAGE_SHIFT);
160 if(len & ~DVMA_PAGE_MASK)
161 end++;
163 for(; index < end ; index++) {
164 // if(dvma_entry_use(index))
165 // BUG();
166 // printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
168 dvma_entry_set(index, __pa(kaddr));
170 iommu_pte[index] |= IOMMU_FULL_BLOCK;
171 // dvma_entry_inc(index);
173 kaddr += DVMA_PAGE_SIZE;
174 }
176 #ifdef DEBUG
177 for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
178 dvma_print(index << DVMA_PAGE_SHIFT);
179 #endif
180 return 0;
182 }
184 void dvma_unmap_iommu(unsigned long baddr, int len)
185 {
187 int index, end;
190 index = baddr >> DVMA_PAGE_SHIFT;
191 end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
193 for(; index < end ; index++) {
194 #ifdef DEBUG
195 printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
196 #endif
197 #if 0
198 if(!dvma_entry_use(index))
199 printk("dvma_unmap freeing unused entry %04x\n",
200 index);
201 else
202 dvma_entry_dec(index);
203 #endif
204 dvma_entry_clr(index);
205 }
207 }