2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static int sync_request(struct page *page, struct block_device *bdev, int op)
20 struct bio_vec bio_vec;
22 bio_init(&bio, &bio_vec, 1);
24 bio_add_page(&bio, page, PAGE_SIZE, 0);
25 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
26 bio_set_op_attrs(&bio, op, 0);
28 return submit_bio_wait(&bio);
31 static int bdev_readpage(void *_sb, struct page *page)
33 struct super_block *sb = _sb;
34 struct block_device *bdev = logfs_super(sb)->s_bdev;
37 err = sync_request(page, bdev, READ);
39 ClearPageUptodate(page);
42 SetPageUptodate(page);
49 static DECLARE_WAIT_QUEUE_HEAD(wq);
51 static void writeseg_end_io(struct bio *bio)
55 struct super_block *sb = bio->bi_private;
56 struct logfs_super *super = logfs_super(sb);
58 BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
60 bio_for_each_segment_all(bvec, bio, i) {
61 end_page_writeback(bvec->bv_page);
62 put_page(bvec->bv_page);
65 if (atomic_dec_and_test(&super->s_pending_writes))
69 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
72 struct logfs_super *super = logfs_super(sb);
73 struct address_space *mapping = super->s_mapping_inode->i_mapping;
74 struct bio *bio = NULL;
76 unsigned int max_pages;
79 max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
81 for (i = 0; i < nr_pages; i++) {
83 bio = bio_alloc(GFP_NOFS, max_pages);
86 bio->bi_bdev = super->s_bdev;
87 bio->bi_iter.bi_sector = ofs >> 9;
89 bio->bi_end_io = writeseg_end_io;
90 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
92 page = find_lock_page(mapping, index + i);
94 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
96 BUG_ON(PageWriteback(page));
97 set_page_writeback(page);
101 /* Block layer cannot split bios :( */
102 ofs += bio->bi_iter.bi_size;
103 atomic_inc(&super->s_pending_writes);
110 atomic_inc(&super->s_pending_writes);
116 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
118 struct logfs_super *super = logfs_super(sb);
121 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
124 /* This can happen when the object fit perfectly into a
125 * segment, the segment gets written per sync and subsequently
130 head = ofs & (PAGE_SIZE - 1);
135 len = PAGE_ALIGN(len);
136 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
140 static void erase_end_io(struct bio *bio)
142 struct super_block *sb = bio->bi_private;
143 struct logfs_super *super = logfs_super(sb);
145 BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
146 BUG_ON(bio->bi_vcnt == 0);
148 if (atomic_dec_and_test(&super->s_pending_writes))
152 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
155 struct logfs_super *super = logfs_super(sb);
157 unsigned int max_pages;
160 max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
162 bio = bio_alloc(GFP_NOFS, max_pages);
165 for (i = 0; i < nr_pages; i++) {
166 if (i >= max_pages) {
167 /* Block layer cannot split bios :( */
169 bio->bi_iter.bi_size = i * PAGE_SIZE;
170 bio->bi_bdev = super->s_bdev;
171 bio->bi_iter.bi_sector = ofs >> 9;
172 bio->bi_private = sb;
173 bio->bi_end_io = erase_end_io;
174 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
175 atomic_inc(&super->s_pending_writes);
178 ofs += i * PAGE_SIZE;
183 bio = bio_alloc(GFP_NOFS, max_pages);
186 bio->bi_io_vec[i].bv_page = super->s_erase_page;
187 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
188 bio->bi_io_vec[i].bv_offset = 0;
190 bio->bi_vcnt = nr_pages;
191 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
192 bio->bi_bdev = super->s_bdev;
193 bio->bi_iter.bi_sector = ofs >> 9;
194 bio->bi_private = sb;
195 bio->bi_end_io = erase_end_io;
196 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
197 atomic_inc(&super->s_pending_writes);
202 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
205 struct logfs_super *super = logfs_super(sb);
207 BUG_ON(to & (PAGE_SIZE - 1));
208 BUG_ON(len & (PAGE_SIZE - 1));
210 if (super->s_flags & LOGFS_SB_FLAG_RO)
215 * Object store doesn't care whether erases happen or not.
216 * But for the journal they are required. Otherwise a scan
217 * can find an old commit entry and assume it is the current
218 * one, travelling back in time.
220 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
226 static void bdev_sync(struct super_block *sb)
228 struct logfs_super *super = logfs_super(sb);
230 wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
233 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
235 struct logfs_super *super = logfs_super(sb);
236 struct address_space *mapping = super->s_mapping_inode->i_mapping;
237 filler_t *filler = bdev_readpage;
240 return read_cache_page(mapping, 0, filler, sb);
243 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
245 struct logfs_super *super = logfs_super(sb);
246 struct address_space *mapping = super->s_mapping_inode->i_mapping;
247 filler_t *filler = bdev_readpage;
248 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
249 pgoff_t index = pos >> PAGE_SHIFT;
252 return read_cache_page(mapping, index, filler, sb);
255 static int bdev_write_sb(struct super_block *sb, struct page *page)
257 struct block_device *bdev = logfs_super(sb)->s_bdev;
259 /* Nothing special to do for block devices. */
260 return sync_request(page, bdev, WRITE);
263 static void bdev_put_device(struct logfs_super *s)
265 blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
268 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
273 static const struct logfs_device_ops bd_devops = {
274 .find_first_sb = bdev_find_first_sb,
275 .find_last_sb = bdev_find_last_sb,
276 .write_sb = bdev_write_sb,
277 .readpage = bdev_readpage,
278 .writeseg = bdev_writeseg,
280 .can_write_buf = bdev_can_write_buf,
282 .put_device = bdev_put_device,
285 int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
288 struct block_device *bdev;
290 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
293 return PTR_ERR(bdev);
295 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
296 int mtdnr = MINOR(bdev->bd_dev);
297 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
298 return logfs_get_sb_mtd(p, mtdnr);
303 p->s_devops = &bd_devops;