2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
221 * Modify inode page cache in such way:
222 * have - blocks with b_blocknr equal to oldb...oldb+count-1
223 * get - blocks with b_blocknr equal to newb...newb+count-1
224 * also we suppose that oldb...oldb+count-1 blocks
225 * situated at the end of file.
227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it.
230 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page)
234 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
235 struct address_space *mapping = inode->i_mapping;
236 pgoff_t index, cur_index;
239 struct buffer_head *head, *bh;
241 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 inode->i_ino, count, oldb, newb);
244 BUG_ON(!locked_page);
245 BUG_ON(!PageLocked(locked_page));
247 cur_index = locked_page->index;
249 for (i = 0; i < count; i += blk_per_page) {
250 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
252 if (likely(cur_index != index)) {
253 page = ufs_get_locked_page(mapping, index);
254 if (!page || IS_ERR(page)) /* it was truncated or EIO */
260 head = page_buffers(page);
263 if (likely(bh->b_blocknr == j + oldb && j < count)) {
264 unmap_underlying_metadata(bh->b_bdev,
266 bh->b_blocknr = newb + j++;
267 mark_buffer_dirty(bh);
270 bh = bh->b_this_page;
271 } while (bh != head);
273 set_page_dirty(page);
275 if (likely(cur_index != index))
276 ufs_put_locked_page(page);
281 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
284 struct buffer_head *bh;
285 sector_t end = beg + n;
287 for (; beg < end; ++beg) {
288 bh = sb_getblk(inode->i_sb, beg);
290 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
291 set_buffer_uptodate(bh);
292 mark_buffer_dirty(bh);
294 if (IS_SYNC(inode) || sync)
295 sync_dirty_buffer(bh);
300 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
301 unsigned goal, unsigned count, int * err, struct page *locked_page)
303 struct super_block * sb;
304 struct ufs_sb_private_info * uspi;
305 struct ufs_super_block_first * usb1;
306 unsigned cgno, oldcount, newcount, tmp, request, result;
308 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
311 uspi = UFS_SB(sb)->s_uspi;
312 usb1 = ubh_get_usb_first(uspi);
317 tmp = fs32_to_cpu(sb, *p);
318 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
319 ufs_warning (sb, "ufs_new_fragments", "internal warning"
320 " fragment %u, count %u", fragment, count);
321 count = uspi->s_fpb - ufs_fragnum(fragment);
323 oldcount = ufs_fragnum (fragment);
324 newcount = oldcount + count;
327 * Somebody else has just allocated our fragments
331 ufs_error (sb, "ufs_new_fragments", "internal error, "
332 "fragment %u, tmp %u\n", fragment, tmp);
336 if (fragment < UFS_I(inode)->i_lastfrag) {
337 UFSD("EXIT (ALREADY ALLOCATED)\n");
344 UFSD("EXIT (ALREADY ALLOCATED)\n");
351 * There is not enough space for user on the device
353 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
355 UFSD("EXIT (FAILED)\n");
359 if (goal >= uspi->s_size)
362 cgno = ufs_inotocg (inode->i_ino);
364 cgno = ufs_dtog (goal);
367 * allocate new fragment
370 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
372 *p = cpu_to_fs32(sb, result);
374 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
375 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
376 locked_page != NULL);
379 UFSD("EXIT, result %u\n", result);
386 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
389 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
390 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
391 locked_page != NULL);
393 UFSD("EXIT, result %u\n", result);
398 * allocate new block and move data
400 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
403 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
404 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
406 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
409 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
412 request = uspi->s_fpb;
413 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
414 (uspi->s_minfree - 2) / 100)
416 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
419 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
421 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
422 result, locked_page);
424 *p = cpu_to_fs32(sb, result);
426 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
427 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
428 locked_page != NULL);
430 if (newcount < request)
431 ufs_free_fragments (inode, result + newcount, request - newcount);
432 ufs_free_fragments (inode, tmp, oldcount);
433 UFSD("EXIT, result %u\n", result);
438 UFSD("EXIT (FAILED)\n");
443 ufs_add_fragments (struct inode * inode, unsigned fragment,
444 unsigned oldcount, unsigned newcount, int * err)
446 struct super_block * sb;
447 struct ufs_sb_private_info * uspi;
448 struct ufs_super_block_first * usb1;
449 struct ufs_cg_private_info * ucpi;
450 struct ufs_cylinder_group * ucg;
451 unsigned cgno, fragno, fragoff, count, fragsize, i;
453 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
456 uspi = UFS_SB(sb)->s_uspi;
457 usb1 = ubh_get_usb_first (uspi);
458 count = newcount - oldcount;
460 cgno = ufs_dtog(fragment);
461 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
463 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
465 ucpi = ufs_load_cylinder (sb, cgno);
468 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
469 if (!ufs_cg_chkmagic(sb, ucg)) {
470 ufs_panic (sb, "ufs_add_fragments",
471 "internal error, bad magic number on cg %u", cgno);
475 fragno = ufs_dtogd (fragment);
476 fragoff = ufs_fragnum (fragno);
477 for (i = oldcount; i < newcount; i++)
478 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
481 * Block can be extended
483 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
484 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
485 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
487 fragsize = i - oldcount;
488 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
489 ufs_panic (sb, "ufs_add_fragments",
490 "internal error or corrupted bitmap on cg %u", cgno);
491 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
492 if (fragsize != count)
493 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
494 for (i = oldcount; i < newcount; i++)
495 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
496 if(DQUOT_ALLOC_BLOCK(inode, count)) {
501 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
502 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
503 uspi->cs_total.cs_nffree -= count;
505 ubh_mark_buffer_dirty (USPI_UBH(uspi));
506 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
507 if (sb->s_flags & MS_SYNCHRONOUS) {
508 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
509 ubh_wait_on_buffer (UCPI_UBH(ucpi));
513 UFSD("EXIT, fragment %u\n", fragment);
518 #define UFS_TEST_FREE_SPACE_CG \
519 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
520 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
522 for (k = count; k < uspi->s_fpb; k++) \
523 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
526 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
527 unsigned goal, unsigned count, int * err)
529 struct super_block * sb;
530 struct ufs_sb_private_info * uspi;
531 struct ufs_super_block_first * usb1;
532 struct ufs_cg_private_info * ucpi;
533 struct ufs_cylinder_group * ucg;
534 unsigned oldcg, i, j, k, result, allocsize;
536 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
539 uspi = UFS_SB(sb)->s_uspi;
540 usb1 = ubh_get_usb_first(uspi);
544 * 1. searching on preferred cylinder group
546 UFS_TEST_FREE_SPACE_CG
549 * 2. quadratic rehash
551 for (j = 1; j < uspi->s_ncg; j *= 2) {
553 if (cgno >= uspi->s_ncg)
555 UFS_TEST_FREE_SPACE_CG
559 * 3. brute force search
560 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
562 cgno = (oldcg + 1) % uspi->s_ncg;
563 for (j = 2; j < uspi->s_ncg; j++) {
565 if (cgno >= uspi->s_ncg)
567 UFS_TEST_FREE_SPACE_CG
570 UFSD("EXIT (FAILED)\n");
574 ucpi = ufs_load_cylinder (sb, cgno);
577 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
578 if (!ufs_cg_chkmagic(sb, ucg))
579 ufs_panic (sb, "ufs_alloc_fragments",
580 "internal error, bad magic number on cg %u", cgno);
581 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
583 if (count == uspi->s_fpb) {
584 result = ufs_alloccg_block (inode, ucpi, goal, err);
585 if (result == (unsigned)-1)
590 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
591 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
594 if (allocsize == uspi->s_fpb) {
595 result = ufs_alloccg_block (inode, ucpi, goal, err);
596 if (result == (unsigned)-1)
598 goal = ufs_dtogd (result);
599 for (i = count; i < uspi->s_fpb; i++)
600 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
601 i = uspi->s_fpb - count;
602 DQUOT_FREE_BLOCK(inode, i);
604 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
605 uspi->cs_total.cs_nffree += i;
606 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
607 fs32_add(sb, &ucg->cg_frsum[i], 1);
611 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
612 if (result == (unsigned)-1)
614 if(DQUOT_ALLOC_BLOCK(inode, count)) {
618 for (i = 0; i < count; i++)
619 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
621 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
622 uspi->cs_total.cs_nffree -= count;
623 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
624 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
626 if (count != allocsize)
627 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
630 ubh_mark_buffer_dirty (USPI_UBH(uspi));
631 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
632 if (sb->s_flags & MS_SYNCHRONOUS) {
633 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
634 ubh_wait_on_buffer (UCPI_UBH(ucpi));
638 result += cgno * uspi->s_fpg;
639 UFSD("EXIT3, result %u\n", result);
643 static unsigned ufs_alloccg_block (struct inode * inode,
644 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
646 struct super_block * sb;
647 struct ufs_sb_private_info * uspi;
648 struct ufs_super_block_first * usb1;
649 struct ufs_cylinder_group * ucg;
650 unsigned result, cylno, blkno;
652 UFSD("ENTER, goal %u\n", goal);
655 uspi = UFS_SB(sb)->s_uspi;
656 usb1 = ubh_get_usb_first(uspi);
657 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
660 goal = ucpi->c_rotor;
663 goal = ufs_blknum (goal);
664 goal = ufs_dtogd (goal);
667 * If the requested block is available, use it.
669 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
675 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
676 if (result == (unsigned)-1)
678 ucpi->c_rotor = result;
680 blkno = ufs_fragstoblks(result);
681 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
682 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
683 ufs_clusteracct (sb, ucpi, blkno, -1);
684 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
689 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
690 uspi->cs_total.cs_nbfree--;
691 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
692 cylno = ufs_cbtocylno(result);
693 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
694 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
696 UFSD("EXIT, result %u\n", result);
701 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
702 struct ufs_buffer_head *ubh,
703 unsigned begin, unsigned size,
704 unsigned char *table, unsigned char mask)
706 unsigned rest, offset;
710 offset = begin & ~uspi->s_fmask;
711 begin >>= uspi->s_fshift;
713 if ((offset + size) < uspi->s_fsize)
716 rest = uspi->s_fsize - offset;
718 cp = ubh->bh[begin]->b_data + offset;
719 while ((table[*cp++] & mask) == 0 && --rest)
726 return (size + rest);
730 * Find a block of the specified size in the specified cylinder group.
731 * @sp: pointer to super block
732 * @ucpi: pointer to cylinder group info
733 * @goal: near which block we want find new one
734 * @count: specified size
736 static unsigned ufs_bitmap_search(struct super_block *sb,
737 struct ufs_cg_private_info *ucpi,
738 unsigned goal, unsigned count)
741 * Bit patterns for identifying fragments in the block map
742 * used as ((map & mask_arr) == want_arr)
744 static const int mask_arr[9] = {
745 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
747 static const int want_arr[9] = {
748 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
750 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
751 struct ufs_super_block_first *usb1;
752 struct ufs_cylinder_group *ucg;
753 unsigned start, length, loc, result;
754 unsigned pos, want, blockmap, mask, end;
756 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
758 usb1 = ubh_get_usb_first (uspi);
759 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
762 start = ufs_dtogd(goal) >> 3;
764 start = ucpi->c_frotor >> 3;
766 length = ((uspi->s_fpg + 7) >> 3) - start;
767 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
768 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
769 1 << (count - 1 + (uspi->s_fpb & 7)));
772 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
773 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
775 1 << (count - 1 + (uspi->s_fpb & 7)));
777 ufs_error(sb, "ufs_bitmap_search",
778 "bitmap corrupted on cg %u, start %u,"
779 " length %u, count %u, freeoff %u\n",
780 ucpi->c_cgx, start, length, count,
786 result = (start + length - loc) << 3;
787 ucpi->c_frotor = result;
790 * found the byte in the map
793 for (end = result + 8; result < end; result += uspi->s_fpb) {
794 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
796 mask = mask_arr[count];
797 want = want_arr[count];
798 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
799 if ((blockmap & mask) == want) {
800 UFSD("EXIT, result %u\n", result);
808 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
810 UFSD("EXIT (FAILED)\n");
814 static void ufs_clusteracct(struct super_block * sb,
815 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
817 struct ufs_sb_private_info * uspi;
818 int i, start, end, forw, back;
820 uspi = UFS_SB(sb)->s_uspi;
821 if (uspi->s_contigsumsize <= 0)
825 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
827 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
830 * Find the size of the cluster going forward.
833 end = start + uspi->s_contigsumsize;
834 if ( end >= ucpi->c_nclusterblks)
835 end = ucpi->c_nclusterblks;
836 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
842 * Find the size of the cluster going backward.
845 end = start - uspi->s_contigsumsize;
848 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
854 * Account for old cluster and the possibly new forward and
858 if (i > uspi->s_contigsumsize)
859 i = uspi->s_contigsumsize;
860 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
862 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
864 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
868 static unsigned char ufs_fragtable_8fpb[] = {
869 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
870 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
871 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
872 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
873 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
874 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
875 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
876 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
877 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
878 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
879 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
880 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
881 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
882 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
883 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
884 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
887 static unsigned char ufs_fragtable_other[] = {
888 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
889 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
890 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
891 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
892 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
893 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
894 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
895 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
896 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
897 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
898 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
899 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
900 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
901 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
902 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
903 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,