ia64/linux-2.6.18-xen.hg

view fs/ufs/balloc.c @ 524:7f8b544237bf

netfront: Allow netfront in domain 0.

This is useful if your physical network device is in a utility domain.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 15 15:18:58 2008 +0100 (2008-04-15)
parents 831230e53067
children
line source
1 /*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
9 #include <linux/fs.h>
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
21 #include "swab.h"
22 #include "util.h"
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
31 /*
32 * Free 'count' fragments from fragment number 'fragment'
33 */
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
35 {
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
43 sb = inode->i_sb;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
52 lock_super(sb);
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
58 goto failed;
59 }
61 ucpi = ufs_load_cylinder (sb, cgno);
62 if (!ucpi)
63 goto failed;
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
67 goto failed;
68 }
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
77 else
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
80 }
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
91 /*
92 * Trying to reassemble free fragments into block
93 */
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
107 }
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
114 }
115 sb->s_dirt = 1;
117 unlock_super (sb);
118 UFSD("EXIT\n");
119 return;
121 failed:
122 unlock_super (sb);
123 UFSD("EXIT (FAILED)\n");
124 return;
125 }
127 /*
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
129 */
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
131 {
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
139 sb = inode->i_sb;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
148 goto failed;
149 }
151 lock_super(sb);
153 do_more:
154 overflow = 0;
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
159 goto failed_unlock;
160 }
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
164 count -= overflow;
165 end_bit -= overflow;
166 }
168 ucpi = ufs_load_cylinder (sb, cgno);
169 if (!ucpi)
170 goto failed_unlock;
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
174 goto failed_unlock;
175 }
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
181 }
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
193 }
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
200 }
202 if (overflow) {
203 fragment += count;
204 count = overflow;
205 goto do_more;
206 }
208 sb->s_dirt = 1;
209 unlock_super (sb);
210 UFSD("EXIT\n");
211 return;
213 failed_unlock:
214 unlock_super (sb);
215 failed:
216 UFSD("EXIT (FAILED)\n");
217 return;
218 }
220 /*
221 * Modify inode page cache in such way:
222 * have - blocks with b_blocknr equal to oldb...oldb+count-1
223 * get - blocks with b_blocknr equal to newb...newb+count-1
224 * also we suppose that oldb...oldb+count-1 blocks
225 * situated at the end of file.
226 *
227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it.
229 */
230 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page)
233 {
234 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
235 struct address_space *mapping = inode->i_mapping;
236 pgoff_t index, cur_index = locked_page->index;
237 unsigned int i, j;
238 struct page *page;
239 struct buffer_head *head, *bh;
241 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 inode->i_ino, count, oldb, newb);
244 BUG_ON(!PageLocked(locked_page));
246 for (i = 0; i < count; i += blk_per_page) {
247 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
249 if (likely(cur_index != index)) {
250 page = ufs_get_locked_page(mapping, index);
251 if (!page || IS_ERR(page)) /* it was truncated or EIO */
252 continue;
253 } else
254 page = locked_page;
256 j = i;
257 head = page_buffers(page);
258 bh = head;
259 do {
260 if (likely(bh->b_blocknr == j + oldb && j < count)) {
261 unmap_underlying_metadata(bh->b_bdev,
262 bh->b_blocknr);
263 bh->b_blocknr = newb + j++;
264 mark_buffer_dirty(bh);
265 }
267 bh = bh->b_this_page;
268 } while (bh != head);
270 set_page_dirty(page);
272 if (likely(cur_index != index))
273 ufs_put_locked_page(page);
274 }
275 UFSD("EXIT\n");
276 }
278 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
279 unsigned goal, unsigned count, int * err, struct page *locked_page)
280 {
281 struct super_block * sb;
282 struct ufs_sb_private_info * uspi;
283 struct ufs_super_block_first * usb1;
284 unsigned cgno, oldcount, newcount, tmp, request, result;
286 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
288 sb = inode->i_sb;
289 uspi = UFS_SB(sb)->s_uspi;
290 usb1 = ubh_get_usb_first(uspi);
291 *err = -ENOSPC;
293 lock_super (sb);
295 tmp = fs32_to_cpu(sb, *p);
296 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
297 ufs_warning (sb, "ufs_new_fragments", "internal warning"
298 " fragment %u, count %u", fragment, count);
299 count = uspi->s_fpb - ufs_fragnum(fragment);
300 }
301 oldcount = ufs_fragnum (fragment);
302 newcount = oldcount + count;
304 /*
305 * Somebody else has just allocated our fragments
306 */
307 if (oldcount) {
308 if (!tmp) {
309 ufs_error (sb, "ufs_new_fragments", "internal error, "
310 "fragment %u, tmp %u\n", fragment, tmp);
311 unlock_super (sb);
312 return (unsigned)-1;
313 }
314 if (fragment < UFS_I(inode)->i_lastfrag) {
315 UFSD("EXIT (ALREADY ALLOCATED)\n");
316 unlock_super (sb);
317 return 0;
318 }
319 }
320 else {
321 if (tmp) {
322 UFSD("EXIT (ALREADY ALLOCATED)\n");
323 unlock_super(sb);
324 return 0;
325 }
326 }
328 /*
329 * There is not enough space for user on the device
330 */
331 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
332 unlock_super (sb);
333 UFSD("EXIT (FAILED)\n");
334 return 0;
335 }
337 if (goal >= uspi->s_size)
338 goal = 0;
339 if (goal == 0)
340 cgno = ufs_inotocg (inode->i_ino);
341 else
342 cgno = ufs_dtog (goal);
344 /*
345 * allocate new fragment
346 */
347 if (oldcount == 0) {
348 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
349 if (result) {
350 *p = cpu_to_fs32(sb, result);
351 *err = 0;
352 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
353 }
354 unlock_super(sb);
355 UFSD("EXIT, result %u\n", result);
356 return result;
357 }
359 /*
360 * resize block
361 */
362 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
363 if (result) {
364 *err = 0;
365 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
366 unlock_super(sb);
367 UFSD("EXIT, result %u\n", result);
368 return result;
369 }
371 /*
372 * allocate new block and move data
373 */
374 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
375 case UFS_OPTSPACE:
376 request = newcount;
377 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
378 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
379 break;
380 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
381 break;
382 default:
383 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
385 case UFS_OPTTIME:
386 request = uspi->s_fpb;
387 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
388 (uspi->s_minfree - 2) / 100)
389 break;
390 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
391 break;
392 }
393 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
394 if (result) {
395 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
396 result, locked_page);
398 *p = cpu_to_fs32(sb, result);
399 *err = 0;
400 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
401 unlock_super(sb);
402 if (newcount < request)
403 ufs_free_fragments (inode, result + newcount, request - newcount);
404 ufs_free_fragments (inode, tmp, oldcount);
405 UFSD("EXIT, result %u\n", result);
406 return result;
407 }
409 unlock_super(sb);
410 UFSD("EXIT (FAILED)\n");
411 return 0;
412 }
414 static unsigned
415 ufs_add_fragments (struct inode * inode, unsigned fragment,
416 unsigned oldcount, unsigned newcount, int * err)
417 {
418 struct super_block * sb;
419 struct ufs_sb_private_info * uspi;
420 struct ufs_super_block_first * usb1;
421 struct ufs_cg_private_info * ucpi;
422 struct ufs_cylinder_group * ucg;
423 unsigned cgno, fragno, fragoff, count, fragsize, i;
425 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
427 sb = inode->i_sb;
428 uspi = UFS_SB(sb)->s_uspi;
429 usb1 = ubh_get_usb_first (uspi);
430 count = newcount - oldcount;
432 cgno = ufs_dtog(fragment);
433 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
434 return 0;
435 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
436 return 0;
437 ucpi = ufs_load_cylinder (sb, cgno);
438 if (!ucpi)
439 return 0;
440 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
441 if (!ufs_cg_chkmagic(sb, ucg)) {
442 ufs_panic (sb, "ufs_add_fragments",
443 "internal error, bad magic number on cg %u", cgno);
444 return 0;
445 }
447 fragno = ufs_dtogd (fragment);
448 fragoff = ufs_fragnum (fragno);
449 for (i = oldcount; i < newcount; i++)
450 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
451 return 0;
452 /*
453 * Block can be extended
454 */
455 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
456 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
457 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
458 break;
459 fragsize = i - oldcount;
460 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
461 ufs_panic (sb, "ufs_add_fragments",
462 "internal error or corrupted bitmap on cg %u", cgno);
463 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
464 if (fragsize != count)
465 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
466 for (i = oldcount; i < newcount; i++)
467 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
468 if(DQUOT_ALLOC_BLOCK(inode, count)) {
469 *err = -EDQUOT;
470 return 0;
471 }
473 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
474 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
475 uspi->cs_total.cs_nffree -= count;
477 ubh_mark_buffer_dirty (USPI_UBH(uspi));
478 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
479 if (sb->s_flags & MS_SYNCHRONOUS) {
480 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
481 ubh_wait_on_buffer (UCPI_UBH(ucpi));
482 }
483 sb->s_dirt = 1;
485 UFSD("EXIT, fragment %u\n", fragment);
487 return fragment;
488 }
490 #define UFS_TEST_FREE_SPACE_CG \
491 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
492 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
493 goto cg_found; \
494 for (k = count; k < uspi->s_fpb; k++) \
495 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
496 goto cg_found;
498 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
499 unsigned goal, unsigned count, int * err)
500 {
501 struct super_block * sb;
502 struct ufs_sb_private_info * uspi;
503 struct ufs_super_block_first * usb1;
504 struct ufs_cg_private_info * ucpi;
505 struct ufs_cylinder_group * ucg;
506 unsigned oldcg, i, j, k, result, allocsize;
508 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
510 sb = inode->i_sb;
511 uspi = UFS_SB(sb)->s_uspi;
512 usb1 = ubh_get_usb_first(uspi);
513 oldcg = cgno;
515 /*
516 * 1. searching on preferred cylinder group
517 */
518 UFS_TEST_FREE_SPACE_CG
520 /*
521 * 2. quadratic rehash
522 */
523 for (j = 1; j < uspi->s_ncg; j *= 2) {
524 cgno += j;
525 if (cgno >= uspi->s_ncg)
526 cgno -= uspi->s_ncg;
527 UFS_TEST_FREE_SPACE_CG
528 }
530 /*
531 * 3. brute force search
532 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
533 */
534 cgno = (oldcg + 1) % uspi->s_ncg;
535 for (j = 2; j < uspi->s_ncg; j++) {
536 cgno++;
537 if (cgno >= uspi->s_ncg)
538 cgno = 0;
539 UFS_TEST_FREE_SPACE_CG
540 }
542 UFSD("EXIT (FAILED)\n");
543 return 0;
545 cg_found:
546 ucpi = ufs_load_cylinder (sb, cgno);
547 if (!ucpi)
548 return 0;
549 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
550 if (!ufs_cg_chkmagic(sb, ucg))
551 ufs_panic (sb, "ufs_alloc_fragments",
552 "internal error, bad magic number on cg %u", cgno);
553 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
555 if (count == uspi->s_fpb) {
556 result = ufs_alloccg_block (inode, ucpi, goal, err);
557 if (result == (unsigned)-1)
558 return 0;
559 goto succed;
560 }
562 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
563 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
564 break;
566 if (allocsize == uspi->s_fpb) {
567 result = ufs_alloccg_block (inode, ucpi, goal, err);
568 if (result == (unsigned)-1)
569 return 0;
570 goal = ufs_dtogd (result);
571 for (i = count; i < uspi->s_fpb; i++)
572 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
573 i = uspi->s_fpb - count;
574 DQUOT_FREE_BLOCK(inode, i);
576 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
577 uspi->cs_total.cs_nffree += i;
578 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
579 fs32_add(sb, &ucg->cg_frsum[i], 1);
580 goto succed;
581 }
583 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
584 if (result == (unsigned)-1)
585 return 0;
586 if(DQUOT_ALLOC_BLOCK(inode, count)) {
587 *err = -EDQUOT;
588 return 0;
589 }
590 for (i = 0; i < count; i++)
591 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
593 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
594 uspi->cs_total.cs_nffree -= count;
595 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
596 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
598 if (count != allocsize)
599 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
601 succed:
602 ubh_mark_buffer_dirty (USPI_UBH(uspi));
603 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
604 if (sb->s_flags & MS_SYNCHRONOUS) {
605 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
606 ubh_wait_on_buffer (UCPI_UBH(ucpi));
607 }
608 sb->s_dirt = 1;
610 result += cgno * uspi->s_fpg;
611 UFSD("EXIT3, result %u\n", result);
612 return result;
613 }
615 static unsigned ufs_alloccg_block (struct inode * inode,
616 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
617 {
618 struct super_block * sb;
619 struct ufs_sb_private_info * uspi;
620 struct ufs_super_block_first * usb1;
621 struct ufs_cylinder_group * ucg;
622 unsigned result, cylno, blkno;
624 UFSD("ENTER, goal %u\n", goal);
626 sb = inode->i_sb;
627 uspi = UFS_SB(sb)->s_uspi;
628 usb1 = ubh_get_usb_first(uspi);
629 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
631 if (goal == 0) {
632 goal = ucpi->c_rotor;
633 goto norot;
634 }
635 goal = ufs_blknum (goal);
636 goal = ufs_dtogd (goal);
638 /*
639 * If the requested block is available, use it.
640 */
641 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
642 result = goal;
643 goto gotit;
644 }
646 norot:
647 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
648 if (result == (unsigned)-1)
649 return (unsigned)-1;
650 ucpi->c_rotor = result;
651 gotit:
652 blkno = ufs_fragstoblks(result);
653 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
654 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
655 ufs_clusteracct (sb, ucpi, blkno, -1);
656 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
657 *err = -EDQUOT;
658 return (unsigned)-1;
659 }
661 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
662 uspi->cs_total.cs_nbfree--;
663 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
664 cylno = ufs_cbtocylno(result);
665 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
666 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
668 UFSD("EXIT, result %u\n", result);
670 return result;
671 }
673 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
674 struct ufs_buffer_head *ubh,
675 unsigned begin, unsigned size,
676 unsigned char *table, unsigned char mask)
677 {
678 unsigned rest, offset;
679 unsigned char *cp;
682 offset = begin & ~uspi->s_fmask;
683 begin >>= uspi->s_fshift;
684 for (;;) {
685 if ((offset + size) < uspi->s_fsize)
686 rest = size;
687 else
688 rest = uspi->s_fsize - offset;
689 size -= rest;
690 cp = ubh->bh[begin]->b_data + offset;
691 while ((table[*cp++] & mask) == 0 && --rest)
692 ;
693 if (rest || !size)
694 break;
695 begin++;
696 offset = 0;
697 }
698 return (size + rest);
699 }
701 /*
702 * Find a block of the specified size in the specified cylinder group.
703 * @sp: pointer to super block
704 * @ucpi: pointer to cylinder group info
705 * @goal: near which block we want find new one
706 * @count: specified size
707 */
708 static unsigned ufs_bitmap_search(struct super_block *sb,
709 struct ufs_cg_private_info *ucpi,
710 unsigned goal, unsigned count)
711 {
712 /*
713 * Bit patterns for identifying fragments in the block map
714 * used as ((map & mask_arr) == want_arr)
715 */
716 static const int mask_arr[9] = {
717 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
718 };
719 static const int want_arr[9] = {
720 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
721 };
722 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
723 struct ufs_super_block_first *usb1;
724 struct ufs_cylinder_group *ucg;
725 unsigned start, length, loc, result;
726 unsigned pos, want, blockmap, mask, end;
728 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
730 usb1 = ubh_get_usb_first (uspi);
731 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
733 if (goal)
734 start = ufs_dtogd(goal) >> 3;
735 else
736 start = ucpi->c_frotor >> 3;
738 length = ((uspi->s_fpg + 7) >> 3) - start;
739 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
740 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
741 1 << (count - 1 + (uspi->s_fpb & 7)));
742 if (loc == 0) {
743 length = start + 1;
744 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
745 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
746 ufs_fragtable_other,
747 1 << (count - 1 + (uspi->s_fpb & 7)));
748 if (loc == 0) {
749 ufs_error(sb, "ufs_bitmap_search",
750 "bitmap corrupted on cg %u, start %u,"
751 " length %u, count %u, freeoff %u\n",
752 ucpi->c_cgx, start, length, count,
753 ucpi->c_freeoff);
754 return (unsigned)-1;
755 }
756 start = 0;
757 }
758 result = (start + length - loc) << 3;
759 ucpi->c_frotor = result;
761 /*
762 * found the byte in the map
763 */
765 for (end = result + 8; result < end; result += uspi->s_fpb) {
766 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
767 blockmap <<= 1;
768 mask = mask_arr[count];
769 want = want_arr[count];
770 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
771 if ((blockmap & mask) == want) {
772 UFSD("EXIT, result %u\n", result);
773 return result + pos;
774 }
775 mask <<= 1;
776 want <<= 1;
777 }
778 }
780 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
781 ucpi->c_cgx);
782 UFSD("EXIT (FAILED)\n");
783 return (unsigned)-1;
784 }
786 static void ufs_clusteracct(struct super_block * sb,
787 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
788 {
789 struct ufs_sb_private_info * uspi;
790 int i, start, end, forw, back;
792 uspi = UFS_SB(sb)->s_uspi;
793 if (uspi->s_contigsumsize <= 0)
794 return;
796 if (cnt > 0)
797 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
798 else
799 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
801 /*
802 * Find the size of the cluster going forward.
803 */
804 start = blkno + 1;
805 end = start + uspi->s_contigsumsize;
806 if ( end >= ucpi->c_nclusterblks)
807 end = ucpi->c_nclusterblks;
808 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
809 if (i > end)
810 i = end;
811 forw = i - start;
813 /*
814 * Find the size of the cluster going backward.
815 */
816 start = blkno - 1;
817 end = start - uspi->s_contigsumsize;
818 if (end < 0 )
819 end = -1;
820 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
821 if ( i < end)
822 i = end;
823 back = start - i;
825 /*
826 * Account for old cluster and the possibly new forward and
827 * back clusters.
828 */
829 i = back + forw + 1;
830 if (i > uspi->s_contigsumsize)
831 i = uspi->s_contigsumsize;
832 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
833 if (back > 0)
834 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
835 if (forw > 0)
836 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
837 }
840 static unsigned char ufs_fragtable_8fpb[] = {
841 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
842 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
843 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
844 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
845 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
846 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
847 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
848 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
849 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
850 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
851 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
852 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
853 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
854 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
855 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
856 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
857 };
859 static unsigned char ufs_fragtable_other[] = {
860 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
861 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
862 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
863 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
864 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
865 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
866 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
867 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
868 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
869 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
870 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
871 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
872 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
873 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
874 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
875 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
876 };