gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
error = gfs2_quota_lock_check(ip);
if (error)
goto out_unlock;
requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, requested, 0);
+ ap.target = requested;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_qunlock;
}
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_alloc_parms ap = { .target = 1, };
struct buffer_head *dibh;
int error;
int unstuff = 0;
if (error)
return error;
- error = gfs2_inplace_reserve(ip, 1, 0);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto do_grow_qunlock;
unstuff = 1;
int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
int ret;
u64 oldsize;
inode_dio_wait(inode);
- ret = gfs2_rs_alloc(GFS2_I(inode));
+ ret = gfs2_rs_alloc(ip);
if (ret)
goto out;
goto out;
}
+ gfs2_rs_deltree(ip->i_res);
ret = do_shrink(inode, oldsize, newsize);
out:
put_write_access(inode);
struct inode *inode = file_inode(vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
if (ret)
goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+ ap.target = data_blocks + ind_blocks;
+ ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
goto out_quota_unlock;
if (!(file->f_mode & FMODE_WRITE))
return 0;
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, &inode->i_writecount);
return 0;
}
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
int error;
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+ ap.target = data_blocks + ind_blocks;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
+#include <linux/lockref.h>
#include "gfs2.h"
#include "incore.h"
*
*/
-void gfs2_glock_hold(struct gfs2_glock *gl)
+static void gfs2_glock_hold(struct gfs2_glock *gl)
{
- GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
- atomic_inc(&gl->gl_ref);
+ GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
+ lockref_get(&gl->gl_lockref);
}
/**
spin_unlock(&lru_lock);
}
-/**
- * gfs2_glock_put_nolock() - Decrement reference count on glock
- * @gl: The glock to put
- *
- * This function should only be used if the caller has its own reference
- * to the glock, in addition to the one it is dropping.
- */
-
-void gfs2_glock_put_nolock(struct gfs2_glock *gl)
-{
- if (atomic_dec_and_test(&gl->gl_ref))
- GLOCK_BUG_ON(gl, 1);
-}
-
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
struct gfs2_sbd *sdp = gl->gl_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
- if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
- __gfs2_glock_remove_from_lru(gl);
- spin_unlock(&lru_lock);
- spin_lock_bucket(gl->gl_hash);
- hlist_bl_del_rcu(&gl->gl_list);
- spin_unlock_bucket(gl->gl_hash);
- GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
- GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
- trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
- }
+ if (lockref_put_or_lock(&gl->gl_lockref))
+ return;
+
+ lockref_mark_dead(&gl->gl_lockref);
+
+ spin_lock(&lru_lock);
+ __gfs2_glock_remove_from_lru(gl);
+ spin_unlock(&lru_lock);
+ spin_unlock(&gl->gl_lockref.lock);
+ spin_lock_bucket(gl->gl_hash);
+ hlist_bl_del_rcu(&gl->gl_list);
+ spin_unlock_bucket(gl->gl_hash);
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+ trace_gfs2_glock_put(gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
/**
continue;
if (gl->gl_sbd != sdp)
continue;
- if (atomic_inc_not_zero(&gl->gl_ref))
+ if (lockref_get_not_dead(&gl->gl_lockref))
return gl;
}
held2 = (new_state != LM_ST_UNLOCKED);
if (held1 != held2) {
+ GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
else
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
}
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
return;
out_unlock:
gl->gl_sbd = sdp;
gl->gl_flags = 0;
gl->gl_name = name;
- atomic_set(&gl->gl_ref, 1);
+ gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
}
}
- spin_unlock(&gl->gl_spin);
+ gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- smp_wmb();
- gfs2_glock_hold(gl);
+ spin_unlock(&gl->gl_spin);
+
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
+ if (!spin_trylock(&gl->gl_spin)) {
+ list_add(&gl->gl_lru, &lru_list);
+ atomic_inc(&lru_count);
+ continue;
+ }
clear_bit(GLF_LRU, &gl->gl_flags);
- gfs2_glock_hold(gl);
spin_unlock(&lru_lock);
- spin_lock(&gl->gl_spin);
+ gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
spin_lock(&lru_lock);
}
rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
- if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
+ if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
}
rcu_read_unlock();
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
- atomic_read(&gl->gl_ref), gl->gl_hold_time);
+ (int)gl->gl_lockref.count, gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
error = dump_holder(seq, gh);
gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
- } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
+ } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
return 0;
}
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
-extern void gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
unsigned flags, struct gfs2_holder *gh);
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
}
}
#include <linux/rbtree.h>
#include <linux/ktime.h>
#include <linux/percpu.h>
+#include <linux/lockref.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
u32 bi_offset;
u32 bi_start;
u32 bi_len;
+ u32 bi_blocks;
};
struct gfs2_rgrpd {
struct gfs2_rbm {
struct gfs2_rgrpd *rgd;
- struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
u32 offset; /* The offset is bitmap relative */
+ int bii; /* Bitmap index */
};
+static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
+{
+ return rbm->rgd->rd_bits + rbm->bii;
+}
+
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
{
- return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
+ return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
+ rbm->offset;
}
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
const struct gfs2_rbm *rbm2)
{
- return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) &&
+ return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
(rbm1->offset == rbm2->offset);
}
unsigned int rs_qa_qd_num;
};
+/*
+ * Allocation parameters
+ * @target: The number of blocks we'd ideally like to allocate
+ * @aflags: The flags (e.g. Orlov flag)
+ *
+ * The intent is to gradually expand this structure over time in
+ * order to give more information, e.g. alignment, min extent size
+ * to the allocation code.
+ */
+struct gfs2_alloc_parms {
+ u32 target;
+ u32 aflags;
+};
+
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
struct gfs2_sbd *gl_sbd;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
- atomic_t gl_ref;
- spinlock_t gl_spin;
+ struct lockref gl_lockref;
+#define gl_spin gl_lockref.lock
/* State fields protected by gl_spin */
unsigned int gl_state:2, /* Current state */
unsigned int gt_logd_secs;
- unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
struct list_head sd_quota_list;
atomic_t sd_quota_count;
struct mutex sd_quota_mutex;
+ struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
int error;
int dblocks = 1;
if (error)
goto out;
- error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_quota;
struct gfs2_inode *ip, int arq)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
int error;
if (arq) {
if (error)
goto fail_quota_locks;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(dip, &ap);
if (error)
goto fail_quota_locks;
if (!IS_ERR(inode)) {
d = d_splice_alias(inode, dentry);
error = 0;
- if (file && !IS_ERR(d)) {
- if (d == NULL)
- d = dentry;
- if (S_ISREG(inode->i_mode))
- error = finish_open(file, d, gfs2_open_common, opened);
- else
+ if (file) {
+ if (S_ISREG(inode->i_mode)) {
+ WARN_ON(d != NULL);
+ error = finish_open(file, dentry, gfs2_open_common, opened);
+ } else {
error = finish_no_open(file, d);
+ }
+ } else {
+ dput(d);
}
gfs2_glock_dq_uninit(ghs);
- if (IS_ERR(d))
- return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
fail_free_inode:
if (ip->i_gl)
gfs2_glock_put(ip->i_gl);
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, NULL);
free_inode_nonrcu(inode);
inode = NULL;
fail_gunlock:
error = finish_open(file, dentry, gfs2_open_common, opened);
gfs2_glock_dq_uninit(&gh);
- if (error)
+ if (error) {
+ dput(d);
return ERR_PTR(error);
+ }
return d;
}
error = 0;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(dip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(dip, &ap);
if (error)
goto out_gunlock_q;
d = __gfs2_lookup(dir, dentry, file, opened);
if (IS_ERR(d))
return PTR_ERR(d);
- if (d == NULL)
- d = dentry;
- if (d->d_inode) {
+ if (d != NULL)
+ dentry = d;
+ if (dentry->d_inode) {
if (!(*opened & FILE_OPENED))
- return finish_no_open(file, d);
+ return finish_no_open(file, dentry);
+ dput(d);
return 0;
}
+ BUG_ON(d != NULL);
if (!(flags & O_CREAT))
return -ENOENT;
goto out_gunlock;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(ndip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(ndip, &ap);
if (error)
goto out_gunlock_q;
{
spin_lock_init(>->gt_spin);
- gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
INIT_LIST_HEAD(&sdp->sd_quota_list);
mutex_init(&sdp->sd_quota_mutex);
+ mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
spin_unlock(&qd_lru_lock);
}
+static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+ unsigned int bit, int new_value)
+{
+ unsigned int c, o, b = bit;
+ int old_value;
+
+ c = b / (8 * PAGE_SIZE);
+ b %= 8 * PAGE_SIZE;
+ o = b / 8;
+ b %= 8;
+
+ old_value = (bitmap[c][o] & (1 << b));
+ gfs2_assert_withdraw(sdp, !old_value != !new_value);
+
+ if (new_value)
+ bitmap[c][o] |= 1 << b;
+ else
+ bitmap[c][o] &= ~(1 << b);
+}
+
static void slot_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
mutex_unlock(&sdp->sd_quota_mutex);
}
+static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ u64 *sync_gen)
+{
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+ (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ return 0;
+
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+
+ set_bit(QDF_LOCKED, &qd->qd_flags);
+ gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
+ qd->qd_change_sync = qd->qd_change;
+ gfs2_assert_warn(sdp, qd->qd_slot_count);
+ qd->qd_slot_count++;
+ return 1;
+}
+
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL;
spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags) ||
- qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
- continue;
-
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
- set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
- atomic_inc(&qd->qd_count);
- qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
- found = 1;
-
- break;
+ found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
+ if (found)
+ break;
}
if (!found)
return 0;
}
-static int qd_trylock(struct gfs2_quota_data *qd)
-{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
- if (sdp->sd_vfs->s_flags & MS_RDONLY)
- return 0;
-
- spin_lock(&qd_lru_lock);
-
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags)) {
- spin_unlock(&qd_lru_lock);
- return 0;
- }
-
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
- set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
- atomic_inc(&qd->qd_count);
- qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
-
- spin_unlock(&qd_lru_lock);
-
- gfs2_assert_warn(sdp, qd->qd_change_sync);
- if (bh_get(qd)) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
- return 0;
- }
-
- return 1;
-}
-
static void qd_unlock(struct gfs2_quota_data *qd)
{
gfs2_assert_warn(qd->qd_gl->gl_sbd,
{
struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks, ind_blocks;
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
- error = gfs2_inplace_reserve(ip, reserved, 0);
+ ap.target = reserved;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_alloc;
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
+ int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
sync = need_sync(qd);
gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
+ if (!sync)
+ continue;
+
+ spin_lock(&qd_lru_lock);
+ found = qd_check_sync(sdp, qd, NULL);
+ spin_unlock(&qd_lru_lock);
- if (sync && qd_trylock(qd))
- qda[count++] = qd;
+ if (!found)
+ continue;
+
+ gfs2_assert_warn(sdp, qd->qd_change_sync);
+ if (bh_get(qd)) {
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
+ continue;
+ }
+
+ qda[count++] = qd;
}
if (count) {
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
- unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
+ unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
unsigned int num_qd;
unsigned int x;
int error = 0;
- sdp->sd_quota_sync_gen++;
-
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
if (!qda)
return -ENOMEM;
+ mutex_lock(&sdp->sd_quota_sync_mutex);
+ sdp->sd_quota_sync_gen++;
+
do {
num_qd = 0;
}
} while (!error && num_qd == max_qd);
+ mutex_unlock(&sdp->sd_quota_sync_mutex);
kfree(qda);
return error;
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, blocks, 0);
+ ap.target = blocks;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_i;
blocks += gfs2_rg_blocks(ip, blocks);
unsigned char new_state)
{
unsigned char *byte1, *byte2, *end, cur_state;
- unsigned int buflen = rbm->bi->bi_len;
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ unsigned int buflen = bi->bi_len;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
- byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
- end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
+ byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ end = bi->bi_bh->b_data + bi->bi_offset + buflen;
BUG_ON(byte1 >= end);
printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
"new_state=%d\n", rbm->offset, cur_state, new_state);
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
- (unsigned long long)rbm->rgd->rd_addr,
- rbm->bi->bi_start);
+ (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
- rbm->bi->bi_offset, rbm->bi->bi_len);
+ bi->bi_offset, bi->bi_len);
dump_stack();
gfs2_consist_rgrpd(rbm->rgd);
return;
}
*byte1 ^= (cur_state ^ new_state) << bit;
- if (do_clone && rbm->bi->bi_clone) {
- byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ if (do_clone && bi->bi_clone) {
+ byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
*byte2 ^= (cur_state ^ new_state) << bit;
}
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
{
- const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
const u8 *byte;
unsigned int bit;
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
u64 rblock = block - rbm->rgd->rd_data0;
- u32 x;
if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG;
- rbm->bi = rbm->rgd->rd_bits;
+ rbm->bii = 0;
rbm->offset = (u32)(rblock);
/* Check if the block is within the first block */
- if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
+ if (rbm->offset < rbm_bi(rbm)->bi_blocks)
return 0;
/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
rbm->offset += (sizeof(struct gfs2_rgrp) -
sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
- x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
- rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
- rbm->bi += x;
+ rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
return 0;
}
+/**
+ * gfs2_rbm_incr - increment an rbm structure
+ * @rbm: The rbm with rgd already set correctly
+ *
+ * This function takes an existing rbm structure and increments it to the next
+ * viable block offset.
+ *
+ * Returns: If incrementing the offset would cause the rbm to go past the
+ * end of the rgrp, true is returned, otherwise false.
+ *
+ */
+
+static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
+{
+ if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
+ rbm->offset++;
+ return false;
+ }
+ if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
+ return true;
+
+ rbm->offset = 0;
+ rbm->bii++;
+ return false;
+}
+
/**
* gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
* @rbm: Position to search (value/result)
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
- u64 block;
u32 n;
u8 res;
(*len)--;
if (*len == 0)
return true;
- block = gfs2_rbm_to_block(rbm);
- if (gfs2_rbm_from_block(rbm, block + 1))
+ if (gfs2_rbm_incr(rbm))
return true;
}
u32 chunk_size;
u8 *ptr, *start, *end;
u64 block;
+ struct gfs2_bitmap *bi;
if (n_unaligned &&
gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
n_unaligned = len & 3;
/* Start is now byte aligned */
while (len > 3) {
- start = rbm.bi->bi_bh->b_data;
- if (rbm.bi->bi_clone)
- start = rbm.bi->bi_clone;
- end = start + rbm.bi->bi_bh->b_size;
- start += rbm.bi->bi_offset;
+ bi = rbm_bi(&rbm);
+ start = bi->bi_bh->b_data;
+ if (bi->bi_clone)
+ start = bi->bi_clone;
+ end = start + bi->bi_bh->b_size;
+ start += bi->bi_offset;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
+ struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
+ clear_bit(GBF_FULL, &bi->bi_flags);
smp_mb__after_clear_bit();
}
}
/**
* gfs2_rs_delete - delete a multi-block reservation
* @ip: The inode for this reservation
+ * @wcount: The inode's write count, or NULL
*
*/
-void gfs2_rs_delete(struct gfs2_inode *ip)
+void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
{
- struct inode *inode = &ip->i_inode;
-
down_write(&ip->i_rw_mutex);
- if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
+ if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
gfs2_rs_deltree(ip->i_res);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* header block */
} else if (x == 0) {
bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* last block */
} else if (x + 1 == length) {
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* other blocks */
} else {
bytes = sdp->sd_sb.sb_bsize -
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
}
bytes_left -= bytes;
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @ip: pointer to the inode for which we're reserving blocks
- * @requested: number of blocks required for this allocation
+ * @ap: the allocation parameters
*
*/
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
- unsigned requested)
+ const struct gfs2_alloc_parms *ap)
{
struct gfs2_rbm rbm = { .rgd = rgd, };
u64 goal;
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+ extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
const struct gfs2_inode *ip, bool nowrap)
{
struct buffer_head *bh;
- struct gfs2_bitmap *initial_bi;
+ int initial_bii;
u32 initial_offset;
u32 offset;
u8 *buffer;
- int index;
int n = 0;
int iters = rbm->rgd->rd_length;
int ret;
+ struct gfs2_bitmap *bi;
/* If we are not starting at the beginning of a bitmap, then we
* need to add one to the bitmap count to ensure that we search
iters++;
while(1) {
- if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
+ bi = rbm_bi(rbm);
+ if (test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
- bh = rbm->bi->bi_bh;
- buffer = bh->b_data + rbm->bi->bi_offset;
+ bh = bi->bi_bh;
+ buffer = bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bh));
- if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
- buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
+ if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+ buffer = bi->bi_clone + bi->bi_offset;
initial_offset = rbm->offset;
- offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
+ offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
if (ip == NULL)
return 0;
- initial_bi = rbm->bi;
+ initial_bii = rbm->bii;
ret = gfs2_reservation_check_and_update(rbm, ip, minext);
if (ret == 0)
return 0;
if (ret > 0) {
- n += (rbm->bi - initial_bi);
+ n += (rbm->bii - initial_bii);
goto next_iter;
}
if (ret == -E2BIG) {
- index = 0;
+ rbm->bii = 0;
rbm->offset = 0;
- n += (rbm->bi - initial_bi);
+ n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;
bitmap_full: /* Mark bitmap as full and fall through */
- if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
- set_bit(GBF_FULL, &rbm->bi->bi_flags);
+ if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ set_bit(GBF_FULL, &bi->bi_flags);
+ }
next_bitmap: /* Find next bitmap in the rgrp */
rbm->offset = 0;
- index = rbm->bi - rbm->rgd->rd_bits;
- index++;
- if (index == rbm->rgd->rd_length)
- index = 0;
+ rbm->bii++;
+ if (rbm->bii == rbm->rgd->rd_length)
+ rbm->bii = 0;
res_covered_end_of_rgrp:
- rbm->bi = &rbm->rgd->rd_bits[index];
- if ((index == 0) && nowrap)
+ if ((rbm->bii == 0) && nowrap)
break;
n++;
next_iter:
struct gfs2_inode *ip;
int error;
int found = 0;
- struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
+ struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
while (1) {
down_write(&sdp->sd_log_flush_lock);
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
- * @requested: the number of blocks to be reserved
+ * @ap: the allocation parameters
*
* Returns: errno
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
- if (gfs2_assert_warn(sdp, requested))
+ if (gfs2_assert_warn(sdp, ap->target))
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rbm.rgd;
- flags = 0; /* Yoda: Do or do not. There is no try */
} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
rs->rs_rbm.rgd = begin = ip->i_rgd;
} else {
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
- if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
+ if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
skip = gfs2_orlov_skip(ip);
if (rs->rs_rbm.rgd == NULL)
return -EBADSLT;
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
- rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
+ rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
/* Skip rgrps when we can't get a reservation on first pass */
if (!gfs2_rs_active(rs) && (loops < 1))
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
+ if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
ip->i_rgd = rs->rs_rbm.rgd;
return 0;
}
*n = 1;
block = gfs2_rbm_to_block(rbm);
- gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
+ gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
block++;
while (*n < elen) {
ret = gfs2_rbm_from_block(&pos, block);
if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
break;
- gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
+ gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
(*n)++;
block++;
u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
+ struct gfs2_bitmap *bi;
rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rbm.rgd) {
while (blen--) {
gfs2_rbm_from_block(&rbm, bstart);
+ bi = rbm_bi(&rbm);
bstart++;
- if (!rbm.bi->bi_clone) {
- rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
- GFP_NOFS | __GFP_NOFAIL);
- memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
- rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
- rbm.bi->bi_len);
+ if (!bi->bi_clone) {
+ bi->bi_clone = kmalloc(bi->bi_bh->b_size,
+ GFP_NOFS | __GFP_NOFAIL);
+ memcpy(bi->bi_clone + bi->bi_offset,
+ bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
}
- gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
+ gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
gfs2_setbit(&rbm, false, new_state);
}
spin_unlock(&rgd->rd_rsspin);
}
+/**
+ * gfs2_set_alloc_start - Set starting point for block allocation
+ * @rbm: The rbm which will be set to the required location
+ * @ip: The gfs2 inode
+ * @dinode: Flag to say if allocation includes a new inode
+ *
+ * This sets the starting point from the reservation if one is active
+ * otherwise it falls back to guessing a start point based on the
+ * inode's goal block or the last allocation point in the rgrp.
+ */
+
+static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
+ const struct gfs2_inode *ip, bool dinode)
+{
+ u64 goal;
+
+ if (gfs2_rs_active(ip->i_res)) {
+ *rbm = ip->i_res->rs_rbm;
+ return;
+ }
+
+ if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
+ goal = ip->i_goal;
+ else
+ goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
+
+ gfs2_rbm_from_block(rbm, goal);
+}
+
/**
* gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
struct buffer_head *dibh;
struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
unsigned int ndata;
- u64 goal;
u64 block; /* block, within the file system scope */
int error;
- if (gfs2_rs_active(ip->i_res))
- goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
- else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
- goal = ip->i_goal;
- else
- goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
-
- gfs2_rbm_from_block(&rbm, goal);
+ gfs2_set_alloc_start(&rbm, ip, dinode);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
if (error == -ENOSPC) {
- gfs2_rbm_from_block(&rbm, goal);
+ gfs2_set_alloc_start(&rbm, ip, dinode);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
}
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
extern int gfs2_rs_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
+extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, NULL);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
-TUNE_ATTR(quota_simul_sync, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
&tune_attr_max_readahead.attr,
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
- &tune_attr_quota_simul_sync.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
return rv;
}
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value)
-{
- unsigned int c, o, b = bit;
- int old_value;
-
- c = b / (8 * PAGE_SIZE);
- b %= 8 * PAGE_SIZE;
- o = b / 8;
- b %= 8;
-
- old_value = (bitmap[c][o] & (1 << b));
- gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
- if (new_value)
- bitmap[c][o] |= 1 << b;
- else
- bitmap[c][o] &= ~(1 << b);
-}
-
#define gfs2_tune_get(sdp, field) \
gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value);
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
#endif /* __UTIL_DOT_H__ */
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
+ struct gfs2_alloc_parms ap = { .target = blks };
struct buffer_head *dibh;
int error;
if (error)
return error;
- error = gfs2_inplace_reserve(ip, blks, 0);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_gunlock_q;
extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+ return ((int)l->count < 0);
+}
+
#endif /* __LINUX_LOCKREF_H */
assert_spin_locked(&lockref->lock);
lockref->count = -128;
}
+EXPORT_SYMBOL(lockref_mark_dead);
/**
* lockref_get_not_dead - Increments count unless the ref is dead