]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/xfs/xfs_buf.c
xfs: remove buffers from the delwri list in xfs_buf_stale
[karo-tx-linux.git] / fs / xfs / xfs_buf.c
index d3c2b58d7d702523445bf06a5e76aa12d11f2db6..3df7d0a2b245f2f5483529ce38d0503c628b74ea 100644 (file)
@@ -151,6 +151,7 @@ xfs_buf_stale(
        struct xfs_buf  *bp)
 {
        bp->b_flags |= XBF_STALE;
+       xfs_buf_delwri_dequeue(bp);
        atomic_set(&(bp)->b_lru_ref, 0);
        if (!list_empty(&bp->b_lru)) {
                struct xfs_buftarg *btp = bp->b_target;
@@ -415,10 +416,7 @@ _xfs_buf_map_pages(
 /*
  *     Look up, and creates if absent, a lockable buffer for
  *     a given range of an inode.  The buffer is returned
- *     locked.  If other overlapping buffers exist, they are
- *     released before the new buffer is created and locked,
- *     which may imply that this call will block until those buffers
- *     are unlocked.  No I/O is implied by this call.
+ *     locked. No I/O is implied by this call.
  */
 xfs_buf_t *
 _xfs_buf_find(
@@ -480,8 +478,6 @@ _xfs_buf_find(
 
        /* No match found */
        if (new_bp) {
-               _xfs_buf_initialize(new_bp, btp, range_base,
-                               range_length, flags);
                rb_link_node(&new_bp->b_rbnode, parent, rbp);
                rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
                /* the buffer keeps the perag reference until it is freed */
@@ -524,35 +520,53 @@ found:
 }
 
 /*
- *     Assembles a buffer covering the specified range.
- *     Storage in memory for all portions of the buffer will be allocated,
- *     although backing storage may not be.
+ * Assembles a buffer covering the specified range. The code is optimised for
+ * cache hits, as metadata intensive workloads will see 3 orders of magnitude
+ * more hits than misses.
  */
-xfs_buf_t *
+struct xfs_buf *
 xfs_buf_get(
        xfs_buftarg_t           *target,/* target for buffer            */
        xfs_off_t               ioff,   /* starting offset of range     */
        size_t                  isize,  /* length of range              */
        xfs_buf_flags_t         flags)
 {
-       xfs_buf_t               *bp, *new_bp;
+       struct xfs_buf          *bp;
+       struct xfs_buf          *new_bp;
        int                     error = 0;
 
+       bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
+       if (likely(bp))
+               goto found;
+
        new_bp = xfs_buf_allocate(flags);
        if (unlikely(!new_bp))
                return NULL;
 
+       _xfs_buf_initialize(new_bp, target,
+                           ioff << BBSHIFT, isize << BBSHIFT, flags);
+
        bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
+       if (!bp) {
+               xfs_buf_deallocate(new_bp);
+               return NULL;
+       }
+
        if (bp == new_bp) {
                error = xfs_buf_allocate_memory(bp, flags);
                if (error)
                        goto no_buffer;
-       } else {
+       } else
                xfs_buf_deallocate(new_bp);
-               if (unlikely(bp == NULL))
-                       return NULL;
-       }
 
+       /*
+        * Now we have a workable buffer, fill in the block number so
+        * that we can do IO on it.
+        */
+       bp->b_bn = ioff;
+       bp->b_count_desired = bp->b_buffer_length;
+
+found:
        if (!(bp->b_flags & XBF_MAPPED)) {
                error = _xfs_buf_map_pages(bp, flags);
                if (unlikely(error)) {
@@ -563,18 +577,10 @@ xfs_buf_get(
        }
 
        XFS_STATS_INC(xb_get);
-
-       /*
-        * Always fill in the block number now, the mapped cases can do
-        * their own overlay of this later.
-        */
-       bp->b_bn = ioff;
-       bp->b_count_desired = bp->b_buffer_length;
-
        trace_xfs_buf_get(bp, flags, _RET_IP_);
        return bp;
 
- no_buffer:
+no_buffer:
        if (flags & (XBF_LOCK | XBF_TRYLOCK))
                xfs_buf_unlock(bp);
        xfs_buf_rele(bp);
@@ -1054,9 +1060,8 @@ xfs_bioerror(
         * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
         */
        XFS_BUF_UNREAD(bp);
-       xfs_buf_delwri_dequeue(bp);
        XFS_BUF_UNDONE(bp);
-       XFS_BUF_STALE(bp);
+       xfs_buf_stale(bp);
 
        xfs_buf_ioend(bp, 0);
 
@@ -1083,9 +1088,8 @@ xfs_bioerror_relse(
         * change that interface.
         */
        XFS_BUF_UNREAD(bp);
-       xfs_buf_delwri_dequeue(bp);
        XFS_BUF_DONE(bp);
-       XFS_BUF_STALE(bp);
+       xfs_buf_stale(bp);
        bp->b_iodone = NULL;
        if (!(fl & XBF_ASYNC)) {
                /*
@@ -1095,7 +1099,7 @@ xfs_bioerror_relse(
                 * ASYNC buffers.
                 */
                xfs_buf_ioerror(bp, EIO);
-               XFS_BUF_FINISH_IOWAIT(bp);
+               complete(&bp->b_iowait);
        } else {
                xfs_buf_relse(bp);
        }
@@ -1830,11 +1834,3 @@ xfs_buf_terminate(void)
        destroy_workqueue(xfslogd_workqueue);
        kmem_zone_destroy(xfs_buf_zone);
 }
-
-#ifdef CONFIG_KDB_MODULES
-struct list_head *
-xfs_get_buftarg_list(void)
-{
-       return &xfs_buftarg_list;
-}
-#endif