]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
zsmalloc: use OBJ_TAG_BIT for bit shifter
authorMinchan Kim <minchan@kernel.org>
Tue, 26 Jul 2016 22:23:37 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 23:19:19 +0000 (16:19 -0700)
Static check warns using tag as bit shifter.  It doesn't break current
working but not good for redability.  Let's use OBJ_TAG_BIT as bit
shifter instead of OBJ_ALLOCATED_TAG.

Link: http://lkml.kernel.org/r/20160607045146.GF26230@bbox
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/zsmalloc.c

index 04a4f063b4fd70c5ff960686d026dcb705805283..6b6986a02aa01c1e967a10cc2449be15752285d5 100644 (file)
@@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
                link = (struct link_free *)vaddr + off / sizeof(*link);
 
                while ((off += class->size) < PAGE_SIZE) {
-                       link->next = freeobj++ << OBJ_ALLOCATED_TAG;
+                       link->next = freeobj++ << OBJ_TAG_BITS;
                        link += class->size / sizeof(*link);
                }
 
@@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
                 */
                next_page = get_next_page(page);
                if (next_page) {
-                       link->next = freeobj++ << OBJ_ALLOCATED_TAG;
+                       link->next = freeobj++ << OBJ_TAG_BITS;
                } else {
                        /*
-                        * Reset OBJ_ALLOCATED_TAG bit to last link to tell
+                        * Reset OBJ_TAG_BITS bit to last link to tell
                         * whether it's allocated object or not.
                         */
-                       link->next = -1 << OBJ_ALLOCATED_TAG;
+                       link->next = -1 << OBJ_TAG_BITS;
                }
                kunmap_atomic(vaddr);
                page = next_page;
@@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class,
 
        vaddr = kmap_atomic(m_page);
        link = (struct link_free *)vaddr + m_offset / sizeof(*link);
-       set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG);
+       set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
        if (likely(!PageHugeObject(m_page)))
                /* record handle in the header of allocated chunk */
                link->handle = handle;
@@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
 
        /* Insert this object in containing zspage's freelist */
        link = (struct link_free *)(vaddr + f_offset);
-       link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG;
+       link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
        kunmap_atomic(vaddr);
        set_freeobj(zspage, f_objidx);
        mod_zspage_inuse(zspage, -1);