ia64/linux-2.6.18-xen.hg

view mm/truncate.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 3e8752eb6d9c
children cad6f60f0506
line source
1 /*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/pagevec.h>
15 #include <linux/buffer_head.h> /* grr. try_to_release_page,
16 do_invalidatepage */
19 static inline void truncate_partial_page(struct page *page, unsigned partial)
20 {
21 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
22 if (PagePrivate(page))
23 do_invalidatepage(page, partial);
24 }
26 /*
27 * If truncate cannot remove the fs-private metadata from the page, the page
28 * becomes anonymous. It will be left on the LRU and may even be mapped into
29 * user pagetables if we're racing with filemap_nopage().
30 *
31 * We need to bale out if page->mapping is no longer equal to the original
32 * mapping. This happens a) when the VM reclaimed the page while we waited on
33 * its lock, b) when a concurrent invalidate_inode_pages got there first and
34 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
35 */
36 static void
37 truncate_complete_page(struct address_space *mapping, struct page *page)
38 {
39 if (page->mapping != mapping)
40 return;
42 if (PagePrivate(page))
43 do_invalidatepage(page, 0);
45 clear_page_dirty(page);
46 ClearPageUptodate(page);
47 ClearPageMappedToDisk(page);
48 remove_from_page_cache(page);
49 page_cache_release(page); /* pagecache ref */
50 }
52 /*
53 * This is for invalidate_inode_pages(). That function can be called at
54 * any time, and is not supposed to throw away dirty pages. But pages can
55 * be marked dirty at any time too. So we re-check the dirtiness inside
56 * ->tree_lock. That provides exclusion against the __set_page_dirty
57 * functions.
58 *
59 * Returns non-zero if the page was successfully invalidated.
60 */
61 static int
62 invalidate_complete_page(struct address_space *mapping, struct page *page)
63 {
64 if (page->mapping != mapping)
65 return 0;
67 if (PagePrivate(page) && !try_to_release_page(page, 0))
68 return 0;
70 write_lock_irq(&mapping->tree_lock);
71 if (PageDirty(page))
72 goto failed;
73 if (page_count(page) != 2) /* caller's ref + pagecache ref */
74 goto failed;
76 BUG_ON(PagePrivate(page));
77 __remove_from_page_cache(page);
78 write_unlock_irq(&mapping->tree_lock);
79 ClearPageUptodate(page);
80 page_cache_release(page); /* pagecache ref */
81 return 1;
82 failed:
83 write_unlock_irq(&mapping->tree_lock);
84 return 0;
85 }
87 /**
88 * truncate_inode_pages - truncate range of pages specified by start and
89 * end byte offsets
90 * @mapping: mapping to truncate
91 * @lstart: offset from which to truncate
92 * @lend: offset to which to truncate
93 *
94 * Truncate the page cache, removing the pages that are between
95 * specified offsets (and zeroing out partial page
96 * (if lstart is not page aligned)).
97 *
98 * Truncate takes two passes - the first pass is nonblocking. It will not
99 * block on page locks and it will not block on writeback. The second pass
100 * will wait. This is to prevent as much IO as possible in the affected region.
101 * The first pass will remove most pages, so the search cost of the second pass
102 * is low.
103 *
104 * When looking at page->index outside the page lock we need to be careful to
105 * copy it into a local to avoid races (it could change at any time).
106 *
107 * We pass down the cache-hot hint to the page freeing code. Even if the
108 * mapping is large, it is probably the case that the final pages are the most
109 * recently touched, and freeing happens in ascending file offset order.
110 */
111 void truncate_inode_pages_range(struct address_space *mapping,
112 loff_t lstart, loff_t lend)
113 {
114 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
115 pgoff_t end;
116 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
117 struct pagevec pvec;
118 pgoff_t next;
119 int i;
121 if (mapping->nrpages == 0)
122 return;
124 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
125 end = (lend >> PAGE_CACHE_SHIFT);
127 pagevec_init(&pvec, 0);
128 next = start;
129 while (next <= end &&
130 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
131 for (i = 0; i < pagevec_count(&pvec); i++) {
132 struct page *page = pvec.pages[i];
133 pgoff_t page_index = page->index;
135 if (page_index > end) {
136 next = page_index;
137 break;
138 }
140 if (page_index > next)
141 next = page_index;
142 next++;
143 if (TestSetPageLocked(page))
144 continue;
145 if (PageWriteback(page)) {
146 unlock_page(page);
147 continue;
148 }
149 truncate_complete_page(mapping, page);
150 unlock_page(page);
151 }
152 pagevec_release(&pvec);
153 cond_resched();
154 }
156 if (partial) {
157 struct page *page = find_lock_page(mapping, start - 1);
158 if (page) {
159 wait_on_page_writeback(page);
160 truncate_partial_page(page, partial);
161 unlock_page(page);
162 page_cache_release(page);
163 }
164 }
166 next = start;
167 for ( ; ; ) {
168 cond_resched();
169 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
170 if (next == start)
171 break;
172 next = start;
173 continue;
174 }
175 if (pvec.pages[0]->index > end) {
176 pagevec_release(&pvec);
177 break;
178 }
179 for (i = 0; i < pagevec_count(&pvec); i++) {
180 struct page *page = pvec.pages[i];
182 if (page->index > end)
183 break;
184 lock_page(page);
185 wait_on_page_writeback(page);
186 if (page->index > next)
187 next = page->index;
188 next++;
189 truncate_complete_page(mapping, page);
190 unlock_page(page);
191 }
192 pagevec_release(&pvec);
193 }
194 }
195 EXPORT_SYMBOL(truncate_inode_pages_range);
197 /**
198 * truncate_inode_pages - truncate *all* the pages from an offset
199 * @mapping: mapping to truncate
200 * @lstart: offset from which to truncate
201 *
202 * Called under (and serialised by) inode->i_mutex.
203 */
204 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
205 {
206 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
207 }
208 EXPORT_SYMBOL(truncate_inode_pages);
210 /**
211 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
212 * @mapping: the address_space which holds the pages to invalidate
213 * @start: the offset 'from' which to invalidate
214 * @end: the offset 'to' which to invalidate (inclusive)
215 *
216 * This function only removes the unlocked pages, if you want to
217 * remove all the pages of one inode, you must call truncate_inode_pages.
218 *
219 * invalidate_mapping_pages() will not block on IO activity. It will not
220 * invalidate pages which are dirty, locked, under writeback or mapped into
221 * pagetables.
222 */
223 unsigned long invalidate_mapping_pages(struct address_space *mapping,
224 pgoff_t start, pgoff_t end)
225 {
226 struct pagevec pvec;
227 pgoff_t next = start;
228 unsigned long ret = 0;
229 int i;
231 pagevec_init(&pvec, 0);
232 while (next <= end &&
233 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
234 for (i = 0; i < pagevec_count(&pvec); i++) {
235 struct page *page = pvec.pages[i];
236 pgoff_t index;
237 int lock_failed;
239 lock_failed = TestSetPageLocked(page);
241 /*
242 * We really shouldn't be looking at the ->index of an
243 * unlocked page. But we're not allowed to lock these
244 * pages. So we rely upon nobody altering the ->index
245 * of this (pinned-by-us) page.
246 */
247 index = page->index;
248 if (index > next)
249 next = index;
250 next++;
251 if (lock_failed)
252 continue;
254 if (PageDirty(page) || PageWriteback(page))
255 goto unlock;
256 if (page_mapped(page))
257 goto unlock;
258 ret += invalidate_complete_page(mapping, page);
259 unlock:
260 unlock_page(page);
261 if (next > end)
262 break;
263 }
264 pagevec_release(&pvec);
265 }
266 return ret;
267 }
269 unsigned long invalidate_inode_pages(struct address_space *mapping)
270 {
271 return invalidate_mapping_pages(mapping, 0, ~0UL);
272 }
273 EXPORT_SYMBOL(invalidate_inode_pages);
275 /*
276 * This is like invalidate_complete_page(), except it ignores the page's
277 * refcount. We do this because invalidate_inode_pages2() needs stronger
278 * invalidation guarantees, and cannot afford to leave pages behind because
279 * shrink_list() has a temp ref on them, or because they're transiently sitting
280 * in the lru_cache_add() pagevecs.
281 */
282 static int
283 invalidate_complete_page2(struct address_space *mapping, struct page *page)
284 {
285 if (page->mapping != mapping)
286 return 0;
288 if (PagePrivate(page) && !try_to_release_page(page, 0))
289 return 0;
291 write_lock_irq(&mapping->tree_lock);
292 if (PageDirty(page))
293 goto failed;
295 BUG_ON(PagePrivate(page));
296 __remove_from_page_cache(page);
297 write_unlock_irq(&mapping->tree_lock);
298 ClearPageUptodate(page);
299 page_cache_release(page); /* pagecache ref */
300 return 1;
301 failed:
302 write_unlock_irq(&mapping->tree_lock);
303 return 0;
304 }
306 /**
307 * invalidate_inode_pages2_range - remove range of pages from an address_space
308 * @mapping: the address_space
309 * @start: the page offset 'from' which to invalidate
310 * @end: the page offset 'to' which to invalidate (inclusive)
311 *
312 * Any pages which are found to be mapped into pagetables are unmapped prior to
313 * invalidation.
314 *
315 * Returns -EIO if any pages could not be invalidated.
316 */
317 int invalidate_inode_pages2_range(struct address_space *mapping,
318 pgoff_t start, pgoff_t end)
319 {
320 struct pagevec pvec;
321 pgoff_t next;
322 int i;
323 int ret = 0;
324 int did_range_unmap = 0;
325 int wrapped = 0;
327 pagevec_init(&pvec, 0);
328 next = start;
329 while (next <= end && !ret && !wrapped &&
330 pagevec_lookup(&pvec, mapping, next,
331 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
332 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
333 struct page *page = pvec.pages[i];
334 pgoff_t page_index;
335 int was_dirty;
337 lock_page(page);
338 if (page->mapping != mapping) {
339 unlock_page(page);
340 continue;
341 }
342 page_index = page->index;
343 next = page_index + 1;
344 if (next == 0)
345 wrapped = 1;
346 if (page_index > end) {
347 unlock_page(page);
348 break;
349 }
350 wait_on_page_writeback(page);
351 while (page_mapped(page)) {
352 if (!did_range_unmap) {
353 /*
354 * Zap the rest of the file in one hit.
355 */
356 unmap_mapping_range(mapping,
357 (loff_t)page_index<<PAGE_CACHE_SHIFT,
358 (loff_t)(end - page_index + 1)
359 << PAGE_CACHE_SHIFT,
360 0);
361 did_range_unmap = 1;
362 } else {
363 /*
364 * Just zap this page
365 */
366 unmap_mapping_range(mapping,
367 (loff_t)page_index<<PAGE_CACHE_SHIFT,
368 PAGE_CACHE_SIZE, 0);
369 }
370 }
371 was_dirty = test_clear_page_dirty(page);
372 if (!invalidate_complete_page2(mapping, page)) {
373 if (was_dirty)
374 set_page_dirty(page);
375 ret = -EIO;
376 }
377 unlock_page(page);
378 }
379 pagevec_release(&pvec);
380 cond_resched();
381 }
382 return ret;
383 }
384 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
386 /**
387 * invalidate_inode_pages2 - remove all pages from an address_space
388 * @mapping: the address_space
389 *
390 * Any pages which are found to be mapped into pagetables are unmapped prior to
391 * invalidation.
392 *
393 * Returns -EIO if any pages could not be invalidated.
394 */
395 int invalidate_inode_pages2(struct address_space *mapping)
396 {
397 return invalidate_inode_pages2_range(mapping, 0, -1);
398 }
399 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);