]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/ext2/xattr.c
Merge tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / fs / ext2 / xattr.c
1 /*
2  * linux/fs/ext2/xattr.c
3  *
4  * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
5  *
6  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7  * Extended attributes for symlinks and special files added per
8  *  suggestion of Luka Renko <luka.renko@hermes.si>.
9  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
10  *  Red Hat Inc.
11  *
12  */
13
14 /*
15  * Extended attributes are stored on disk blocks allocated outside of
16  * any inode. The i_file_acl field is then made to point to this allocated
17  * block. If all extended attributes of an inode are identical, these
18  * inodes may share the same extended attribute block. Such situations
19  * are automatically detected by keeping a cache of recent attribute block
20  * numbers and hashes over the block's contents in memory.
21  *
22  *
23  * Extended attribute block layout:
24  *
25  *   +------------------+
26  *   | header           |
27  *   | entry 1          | |
28  *   | entry 2          | | growing downwards
29  *   | entry 3          | v
30  *   | four null bytes  |
31  *   | . . .            |
32  *   | value 1          | ^
33  *   | value 3          | | growing upwards
34  *   | value 2          | |
35  *   +------------------+
36  *
37  * The block header is followed by multiple entry descriptors. These entry
38  * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
39  * byte boundaries. The entry descriptors are sorted by attribute name,
40  * so that two extended attribute blocks can be compared efficiently.
41  *
42  * Attribute values are aligned to the end of the block, stored in
43  * no specific order. They are also padded to EXT2_XATTR_PAD byte
44  * boundaries. No additional gaps are left between them.
45  *
46  * Locking strategy
47  * ----------------
48  * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
49  * EA blocks are only changed if they are exclusive to an inode, so
50  * holding xattr_sem also means that nothing but the EA block's reference
51  * count will change. Multiple writers to an EA block are synchronized
52  * by the bh lock. No more than a single bh lock is held at any time
53  * to avoid deadlocks.
54  */
55
56 #include <linux/buffer_head.h>
57 #include <linux/init.h>
58 #include <linux/slab.h>
59 #include <linux/mbcache.h>
60 #include <linux/quotaops.h>
61 #include <linux/rwsem.h>
62 #include <linux/security.h>
63 #include "ext2.h"
64 #include "xattr.h"
65 #include "acl.h"
66
67 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
68 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
69 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
70 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
71
72 #ifdef EXT2_XATTR_DEBUG
73 # define ea_idebug(inode, f...) do { \
74                 printk(KERN_DEBUG "inode %s:%ld: ", \
75                         inode->i_sb->s_id, inode->i_ino); \
76                 printk(f); \
77                 printk("\n"); \
78         } while (0)
79 # define ea_bdebug(bh, f...) do { \
80                 char b[BDEVNAME_SIZE]; \
81                 printk(KERN_DEBUG "block %s:%lu: ", \
82                         bdevname(bh->b_bdev, b), \
83                         (unsigned long) bh->b_blocknr); \
84                 printk(f); \
85                 printk("\n"); \
86         } while (0)
87 #else
88 # define ea_idebug(f...)
89 # define ea_bdebug(f...)
90 #endif
91
92 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
93                            struct ext2_xattr_header *);
94
95 static int ext2_xattr_cache_insert(struct buffer_head *);
96 static struct buffer_head *ext2_xattr_cache_find(struct inode *,
97                                                  struct ext2_xattr_header *);
98 static void ext2_xattr_rehash(struct ext2_xattr_header *,
99                               struct ext2_xattr_entry *);
100
101 static struct mb_cache *ext2_xattr_cache;
102
103 static const struct xattr_handler *ext2_xattr_handler_map[] = {
104         [EXT2_XATTR_INDEX_USER]              = &ext2_xattr_user_handler,
105 #ifdef CONFIG_EXT2_FS_POSIX_ACL
106         [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext2_xattr_acl_access_handler,
107         [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler,
108 #endif
109         [EXT2_XATTR_INDEX_TRUSTED]           = &ext2_xattr_trusted_handler,
110 #ifdef CONFIG_EXT2_FS_SECURITY
111         [EXT2_XATTR_INDEX_SECURITY]          = &ext2_xattr_security_handler,
112 #endif
113 };
114
115 const struct xattr_handler *ext2_xattr_handlers[] = {
116         &ext2_xattr_user_handler,
117         &ext2_xattr_trusted_handler,
118 #ifdef CONFIG_EXT2_FS_POSIX_ACL
119         &ext2_xattr_acl_access_handler,
120         &ext2_xattr_acl_default_handler,
121 #endif
122 #ifdef CONFIG_EXT2_FS_SECURITY
123         &ext2_xattr_security_handler,
124 #endif
125         NULL
126 };
127
128 static inline const struct xattr_handler *
129 ext2_xattr_handler(int name_index)
130 {
131         const struct xattr_handler *handler = NULL;
132
133         if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
134                 handler = ext2_xattr_handler_map[name_index];
135         return handler;
136 }
137
138 /*
139  * ext2_xattr_get()
140  *
141  * Copy an extended attribute into the buffer
142  * provided, or compute the buffer size required.
143  * Buffer is NULL to compute the size of the buffer required.
144  *
145  * Returns a negative error number on failure, or the number of bytes
146  * used / required on success.
147  */
148 int
149 ext2_xattr_get(struct inode *inode, int name_index, const char *name,
150                void *buffer, size_t buffer_size)
151 {
152         struct buffer_head *bh = NULL;
153         struct ext2_xattr_entry *entry;
154         size_t name_len, size;
155         char *end;
156         int error;
157
158         ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
159                   name_index, name, buffer, (long)buffer_size);
160
161         if (name == NULL)
162                 return -EINVAL;
163         name_len = strlen(name);
164         if (name_len > 255)
165                 return -ERANGE;
166
167         down_read(&EXT2_I(inode)->xattr_sem);
168         error = -ENODATA;
169         if (!EXT2_I(inode)->i_file_acl)
170                 goto cleanup;
171         ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
172         bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
173         error = -EIO;
174         if (!bh)
175                 goto cleanup;
176         ea_bdebug(bh, "b_count=%d, refcount=%d",
177                 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
178         end = bh->b_data + bh->b_size;
179         if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
180             HDR(bh)->h_blocks != cpu_to_le32(1)) {
181 bad_block:      ext2_error(inode->i_sb, "ext2_xattr_get",
182                         "inode %ld: bad block %d", inode->i_ino,
183                         EXT2_I(inode)->i_file_acl);
184                 error = -EIO;
185                 goto cleanup;
186         }
187
188         /* find named attribute */
189         entry = FIRST_ENTRY(bh);
190         while (!IS_LAST_ENTRY(entry)) {
191                 struct ext2_xattr_entry *next =
192                         EXT2_XATTR_NEXT(entry);
193                 if ((char *)next >= end)
194                         goto bad_block;
195                 if (name_index == entry->e_name_index &&
196                     name_len == entry->e_name_len &&
197                     memcmp(name, entry->e_name, name_len) == 0)
198                         goto found;
199                 entry = next;
200         }
201         if (ext2_xattr_cache_insert(bh))
202                 ea_idebug(inode, "cache insert failed");
203         error = -ENODATA;
204         goto cleanup;
205 found:
206         /* check the buffer size */
207         if (entry->e_value_block != 0)
208                 goto bad_block;
209         size = le32_to_cpu(entry->e_value_size);
210         if (size > inode->i_sb->s_blocksize ||
211             le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
212                 goto bad_block;
213
214         if (ext2_xattr_cache_insert(bh))
215                 ea_idebug(inode, "cache insert failed");
216         if (buffer) {
217                 error = -ERANGE;
218                 if (size > buffer_size)
219                         goto cleanup;
220                 /* return value of attribute */
221                 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
222                         size);
223         }
224         error = size;
225
226 cleanup:
227         brelse(bh);
228         up_read(&EXT2_I(inode)->xattr_sem);
229
230         return error;
231 }
232
233 /*
234  * ext2_xattr_list()
235  *
236  * Copy a list of attribute names into the buffer
237  * provided, or compute the buffer size required.
238  * Buffer is NULL to compute the size of the buffer required.
239  *
240  * Returns a negative error number on failure, or the number of bytes
241  * used / required on success.
242  */
243 static int
244 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
245 {
246         struct inode *inode = dentry->d_inode;
247         struct buffer_head *bh = NULL;
248         struct ext2_xattr_entry *entry;
249         char *end;
250         size_t rest = buffer_size;
251         int error;
252
253         ea_idebug(inode, "buffer=%p, buffer_size=%ld",
254                   buffer, (long)buffer_size);
255
256         down_read(&EXT2_I(inode)->xattr_sem);
257         error = 0;
258         if (!EXT2_I(inode)->i_file_acl)
259                 goto cleanup;
260         ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
261         bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
262         error = -EIO;
263         if (!bh)
264                 goto cleanup;
265         ea_bdebug(bh, "b_count=%d, refcount=%d",
266                 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
267         end = bh->b_data + bh->b_size;
268         if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
269             HDR(bh)->h_blocks != cpu_to_le32(1)) {
270 bad_block:      ext2_error(inode->i_sb, "ext2_xattr_list",
271                         "inode %ld: bad block %d", inode->i_ino,
272                         EXT2_I(inode)->i_file_acl);
273                 error = -EIO;
274                 goto cleanup;
275         }
276
277         /* check the on-disk data structure */
278         entry = FIRST_ENTRY(bh);
279         while (!IS_LAST_ENTRY(entry)) {
280                 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
281
282                 if ((char *)next >= end)
283                         goto bad_block;
284                 entry = next;
285         }
286         if (ext2_xattr_cache_insert(bh))
287                 ea_idebug(inode, "cache insert failed");
288
289         /* list the attribute names */
290         for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
291              entry = EXT2_XATTR_NEXT(entry)) {
292                 const struct xattr_handler *handler =
293                         ext2_xattr_handler(entry->e_name_index);
294
295                 if (handler) {
296                         size_t size = handler->list(dentry, buffer, rest,
297                                                     entry->e_name,
298                                                     entry->e_name_len,
299                                                     handler->flags);
300                         if (buffer) {
301                                 if (size > rest) {
302                                         error = -ERANGE;
303                                         goto cleanup;
304                                 }
305                                 buffer += size;
306                         }
307                         rest -= size;
308                 }
309         }
310         error = buffer_size - rest;  /* total size */
311
312 cleanup:
313         brelse(bh);
314         up_read(&EXT2_I(inode)->xattr_sem);
315
316         return error;
317 }
318
319 /*
320  * Inode operation listxattr()
321  *
322  * dentry->d_inode->i_mutex: don't care
323  */
324 ssize_t
325 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
326 {
327         return ext2_xattr_list(dentry, buffer, size);
328 }
329
330 /*
331  * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
332  * not set, set it.
333  */
334 static void ext2_xattr_update_super_block(struct super_block *sb)
335 {
336         if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
337                 return;
338
339         spin_lock(&EXT2_SB(sb)->s_lock);
340         EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
341         spin_unlock(&EXT2_SB(sb)->s_lock);
342         sb->s_dirt = 1;
343         mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
344 }
345
346 /*
347  * ext2_xattr_set()
348  *
349  * Create, replace or remove an extended attribute for this inode.  Value
350  * is NULL to remove an existing extended attribute, and non-NULL to
351  * either replace an existing extended attribute, or create a new extended
352  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
353  * specify that an extended attribute must exist and must not exist
354  * previous to the call, respectively.
355  *
356  * Returns 0, or a negative error number on failure.
357  */
358 int
359 ext2_xattr_set(struct inode *inode, int name_index, const char *name,
360                const void *value, size_t value_len, int flags)
361 {
362         struct super_block *sb = inode->i_sb;
363         struct buffer_head *bh = NULL;
364         struct ext2_xattr_header *header = NULL;
365         struct ext2_xattr_entry *here, *last;
366         size_t name_len, free, min_offs = sb->s_blocksize;
367         int not_found = 1, error;
368         char *end;
369         
370         /*
371          * header -- Points either into bh, or to a temporarily
372          *           allocated buffer.
373          * here -- The named entry found, or the place for inserting, within
374          *         the block pointed to by header.
375          * last -- Points right after the last named entry within the block
376          *         pointed to by header.
377          * min_offs -- The offset of the first value (values are aligned
378          *             towards the end of the block).
379          * end -- Points right after the block pointed to by header.
380          */
381         
382         ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
383                   name_index, name, value, (long)value_len);
384
385         if (value == NULL)
386                 value_len = 0;
387         if (name == NULL)
388                 return -EINVAL;
389         name_len = strlen(name);
390         if (name_len > 255 || value_len > sb->s_blocksize)
391                 return -ERANGE;
392         down_write(&EXT2_I(inode)->xattr_sem);
393         if (EXT2_I(inode)->i_file_acl) {
394                 /* The inode already has an extended attribute block. */
395                 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
396                 error = -EIO;
397                 if (!bh)
398                         goto cleanup;
399                 ea_bdebug(bh, "b_count=%d, refcount=%d",
400                         atomic_read(&(bh->b_count)),
401                         le32_to_cpu(HDR(bh)->h_refcount));
402                 header = HDR(bh);
403                 end = bh->b_data + bh->b_size;
404                 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
405                     header->h_blocks != cpu_to_le32(1)) {
406 bad_block:              ext2_error(sb, "ext2_xattr_set",
407                                 "inode %ld: bad block %d", inode->i_ino, 
408                                    EXT2_I(inode)->i_file_acl);
409                         error = -EIO;
410                         goto cleanup;
411                 }
412                 /* Find the named attribute. */
413                 here = FIRST_ENTRY(bh);
414                 while (!IS_LAST_ENTRY(here)) {
415                         struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
416                         if ((char *)next >= end)
417                                 goto bad_block;
418                         if (!here->e_value_block && here->e_value_size) {
419                                 size_t offs = le16_to_cpu(here->e_value_offs);
420                                 if (offs < min_offs)
421                                         min_offs = offs;
422                         }
423                         not_found = name_index - here->e_name_index;
424                         if (!not_found)
425                                 not_found = name_len - here->e_name_len;
426                         if (!not_found)
427                                 not_found = memcmp(name, here->e_name,name_len);
428                         if (not_found <= 0)
429                                 break;
430                         here = next;
431                 }
432                 last = here;
433                 /* We still need to compute min_offs and last. */
434                 while (!IS_LAST_ENTRY(last)) {
435                         struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
436                         if ((char *)next >= end)
437                                 goto bad_block;
438                         if (!last->e_value_block && last->e_value_size) {
439                                 size_t offs = le16_to_cpu(last->e_value_offs);
440                                 if (offs < min_offs)
441                                         min_offs = offs;
442                         }
443                         last = next;
444                 }
445
446                 /* Check whether we have enough space left. */
447                 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
448         } else {
449                 /* We will use a new extended attribute block. */
450                 free = sb->s_blocksize -
451                         sizeof(struct ext2_xattr_header) - sizeof(__u32);
452                 here = last = NULL;  /* avoid gcc uninitialized warning. */
453         }
454
455         if (not_found) {
456                 /* Request to remove a nonexistent attribute? */
457                 error = -ENODATA;
458                 if (flags & XATTR_REPLACE)
459                         goto cleanup;
460                 error = 0;
461                 if (value == NULL)
462                         goto cleanup;
463         } else {
464                 /* Request to create an existing attribute? */
465                 error = -EEXIST;
466                 if (flags & XATTR_CREATE)
467                         goto cleanup;
468                 if (!here->e_value_block && here->e_value_size) {
469                         size_t size = le32_to_cpu(here->e_value_size);
470
471                         if (le16_to_cpu(here->e_value_offs) + size > 
472                             sb->s_blocksize || size > sb->s_blocksize)
473                                 goto bad_block;
474                         free += EXT2_XATTR_SIZE(size);
475                 }
476                 free += EXT2_XATTR_LEN(name_len);
477         }
478         error = -ENOSPC;
479         if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
480                 goto cleanup;
481
482         /* Here we know that we can set the new attribute. */
483
484         if (header) {
485                 struct mb_cache_entry *ce;
486
487                 /* assert(header == HDR(bh)); */
488                 ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev,
489                                         bh->b_blocknr);
490                 lock_buffer(bh);
491                 if (header->h_refcount == cpu_to_le32(1)) {
492                         ea_bdebug(bh, "modifying in-place");
493                         if (ce)
494                                 mb_cache_entry_free(ce);
495                         /* keep the buffer locked while modifying it. */
496                 } else {
497                         int offset;
498
499                         if (ce)
500                                 mb_cache_entry_release(ce);
501                         unlock_buffer(bh);
502                         ea_bdebug(bh, "cloning");
503                         header = kmalloc(bh->b_size, GFP_KERNEL);
504                         error = -ENOMEM;
505                         if (header == NULL)
506                                 goto cleanup;
507                         memcpy(header, HDR(bh), bh->b_size);
508                         header->h_refcount = cpu_to_le32(1);
509
510                         offset = (char *)here - bh->b_data;
511                         here = ENTRY((char *)header + offset);
512                         offset = (char *)last - bh->b_data;
513                         last = ENTRY((char *)header + offset);
514                 }
515         } else {
516                 /* Allocate a buffer where we construct the new block. */
517                 header = kzalloc(sb->s_blocksize, GFP_KERNEL);
518                 error = -ENOMEM;
519                 if (header == NULL)
520                         goto cleanup;
521                 end = (char *)header + sb->s_blocksize;
522                 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
523                 header->h_blocks = header->h_refcount = cpu_to_le32(1);
524                 last = here = ENTRY(header+1);
525         }
526
527         /* Iff we are modifying the block in-place, bh is locked here. */
528
529         if (not_found) {
530                 /* Insert the new name. */
531                 size_t size = EXT2_XATTR_LEN(name_len);
532                 size_t rest = (char *)last - (char *)here;
533                 memmove((char *)here + size, here, rest);
534                 memset(here, 0, size);
535                 here->e_name_index = name_index;
536                 here->e_name_len = name_len;
537                 memcpy(here->e_name, name, name_len);
538         } else {
539                 if (!here->e_value_block && here->e_value_size) {
540                         char *first_val = (char *)header + min_offs;
541                         size_t offs = le16_to_cpu(here->e_value_offs);
542                         char *val = (char *)header + offs;
543                         size_t size = EXT2_XATTR_SIZE(
544                                 le32_to_cpu(here->e_value_size));
545
546                         if (size == EXT2_XATTR_SIZE(value_len)) {
547                                 /* The old and the new value have the same
548                                    size. Just replace. */
549                                 here->e_value_size = cpu_to_le32(value_len);
550                                 memset(val + size - EXT2_XATTR_PAD, 0,
551                                        EXT2_XATTR_PAD); /* Clear pad bytes. */
552                                 memcpy(val, value, value_len);
553                                 goto skip_replace;
554                         }
555
556                         /* Remove the old value. */
557                         memmove(first_val + size, first_val, val - first_val);
558                         memset(first_val, 0, size);
559                         here->e_value_offs = 0;
560                         min_offs += size;
561
562                         /* Adjust all value offsets. */
563                         last = ENTRY(header+1);
564                         while (!IS_LAST_ENTRY(last)) {
565                                 size_t o = le16_to_cpu(last->e_value_offs);
566                                 if (!last->e_value_block && o < offs)
567                                         last->e_value_offs =
568                                                 cpu_to_le16(o + size);
569                                 last = EXT2_XATTR_NEXT(last);
570                         }
571                 }
572                 if (value == NULL) {
573                         /* Remove the old name. */
574                         size_t size = EXT2_XATTR_LEN(name_len);
575                         last = ENTRY((char *)last - size);
576                         memmove(here, (char*)here + size,
577                                 (char*)last - (char*)here);
578                         memset(last, 0, size);
579                 }
580         }
581
582         if (value != NULL) {
583                 /* Insert the new value. */
584                 here->e_value_size = cpu_to_le32(value_len);
585                 if (value_len) {
586                         size_t size = EXT2_XATTR_SIZE(value_len);
587                         char *val = (char *)header + min_offs - size;
588                         here->e_value_offs =
589                                 cpu_to_le16((char *)val - (char *)header);
590                         memset(val + size - EXT2_XATTR_PAD, 0,
591                                EXT2_XATTR_PAD); /* Clear the pad bytes. */
592                         memcpy(val, value, value_len);
593                 }
594         }
595
596 skip_replace:
597         if (IS_LAST_ENTRY(ENTRY(header+1))) {
598                 /* This block is now empty. */
599                 if (bh && header == HDR(bh))
600                         unlock_buffer(bh);  /* we were modifying in-place. */
601                 error = ext2_xattr_set2(inode, bh, NULL);
602         } else {
603                 ext2_xattr_rehash(header, here);
604                 if (bh && header == HDR(bh))
605                         unlock_buffer(bh);  /* we were modifying in-place. */
606                 error = ext2_xattr_set2(inode, bh, header);
607         }
608
609 cleanup:
610         brelse(bh);
611         if (!(bh && header == HDR(bh)))
612                 kfree(header);
613         up_write(&EXT2_I(inode)->xattr_sem);
614
615         return error;
616 }
617
618 /*
619  * Second half of ext2_xattr_set(): Update the file system.
620  */
621 static int
622 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
623                 struct ext2_xattr_header *header)
624 {
625         struct super_block *sb = inode->i_sb;
626         struct buffer_head *new_bh = NULL;
627         int error;
628
629         if (header) {
630                 new_bh = ext2_xattr_cache_find(inode, header);
631                 if (new_bh) {
632                         /* We found an identical block in the cache. */
633                         if (new_bh == old_bh) {
634                                 ea_bdebug(new_bh, "keeping this block");
635                         } else {
636                                 /* The old block is released after updating
637                                    the inode.  */
638                                 ea_bdebug(new_bh, "reusing block");
639
640                                 error = dquot_alloc_block(inode, 1);
641                                 if (error) {
642                                         unlock_buffer(new_bh);
643                                         goto cleanup;
644                                 }
645                                 le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
646                                 ea_bdebug(new_bh, "refcount now=%d",
647                                         le32_to_cpu(HDR(new_bh)->h_refcount));
648                         }
649                         unlock_buffer(new_bh);
650                 } else if (old_bh && header == HDR(old_bh)) {
651                         /* Keep this block. No need to lock the block as we
652                            don't need to change the reference count. */
653                         new_bh = old_bh;
654                         get_bh(new_bh);
655                         ext2_xattr_cache_insert(new_bh);
656                 } else {
657                         /* We need to allocate a new block */
658                         ext2_fsblk_t goal = ext2_group_first_block_no(sb,
659                                                 EXT2_I(inode)->i_block_group);
660                         int block = ext2_new_block(inode, goal, &error);
661                         if (error)
662                                 goto cleanup;
663                         ea_idebug(inode, "creating block %d", block);
664
665                         new_bh = sb_getblk(sb, block);
666                         if (!new_bh) {
667                                 ext2_free_blocks(inode, block, 1);
668                                 mark_inode_dirty(inode);
669                                 error = -EIO;
670                                 goto cleanup;
671                         }
672                         lock_buffer(new_bh);
673                         memcpy(new_bh->b_data, header, new_bh->b_size);
674                         set_buffer_uptodate(new_bh);
675                         unlock_buffer(new_bh);
676                         ext2_xattr_cache_insert(new_bh);
677                         
678                         ext2_xattr_update_super_block(sb);
679                 }
680                 mark_buffer_dirty(new_bh);
681                 if (IS_SYNC(inode)) {
682                         sync_dirty_buffer(new_bh);
683                         error = -EIO;
684                         if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
685                                 goto cleanup;
686                 }
687         }
688
689         /* Update the inode. */
690         EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
691         inode->i_ctime = CURRENT_TIME_SEC;
692         if (IS_SYNC(inode)) {
693                 error = sync_inode_metadata(inode, 1);
694                 /* In case sync failed due to ENOSPC the inode was actually
695                  * written (only some dirty data were not) so we just proceed
696                  * as if nothing happened and cleanup the unused block */
697                 if (error && error != -ENOSPC) {
698                         if (new_bh && new_bh != old_bh) {
699                                 dquot_free_block_nodirty(inode, 1);
700                                 mark_inode_dirty(inode);
701                         }
702                         goto cleanup;
703                 }
704         } else
705                 mark_inode_dirty(inode);
706
707         error = 0;
708         if (old_bh && old_bh != new_bh) {
709                 struct mb_cache_entry *ce;
710
711                 /*
712                  * If there was an old block and we are no longer using it,
713                  * release the old block.
714                  */
715                 ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,
716                                         old_bh->b_blocknr);
717                 lock_buffer(old_bh);
718                 if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
719                         /* Free the old block. */
720                         if (ce)
721                                 mb_cache_entry_free(ce);
722                         ea_bdebug(old_bh, "freeing");
723                         ext2_free_blocks(inode, old_bh->b_blocknr, 1);
724                         mark_inode_dirty(inode);
725                         /* We let our caller release old_bh, so we
726                          * need to duplicate the buffer before. */
727                         get_bh(old_bh);
728                         bforget(old_bh);
729                 } else {
730                         /* Decrement the refcount only. */
731                         le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
732                         if (ce)
733                                 mb_cache_entry_release(ce);
734                         dquot_free_block_nodirty(inode, 1);
735                         mark_inode_dirty(inode);
736                         mark_buffer_dirty(old_bh);
737                         ea_bdebug(old_bh, "refcount now=%d",
738                                 le32_to_cpu(HDR(old_bh)->h_refcount));
739                 }
740                 unlock_buffer(old_bh);
741         }
742
743 cleanup:
744         brelse(new_bh);
745
746         return error;
747 }
748
749 /*
750  * ext2_xattr_delete_inode()
751  *
752  * Free extended attribute resources associated with this inode. This
753  * is called immediately before an inode is freed.
754  */
755 void
756 ext2_xattr_delete_inode(struct inode *inode)
757 {
758         struct buffer_head *bh = NULL;
759         struct mb_cache_entry *ce;
760
761         down_write(&EXT2_I(inode)->xattr_sem);
762         if (!EXT2_I(inode)->i_file_acl)
763                 goto cleanup;
764         bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
765         if (!bh) {
766                 ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
767                         "inode %ld: block %d read error", inode->i_ino,
768                         EXT2_I(inode)->i_file_acl);
769                 goto cleanup;
770         }
771         ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
772         if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
773             HDR(bh)->h_blocks != cpu_to_le32(1)) {
774                 ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
775                         "inode %ld: bad block %d", inode->i_ino,
776                         EXT2_I(inode)->i_file_acl);
777                 goto cleanup;
778         }
779         ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr);
780         lock_buffer(bh);
781         if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
782                 if (ce)
783                         mb_cache_entry_free(ce);
784                 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
785                 get_bh(bh);
786                 bforget(bh);
787                 unlock_buffer(bh);
788         } else {
789                 le32_add_cpu(&HDR(bh)->h_refcount, -1);
790                 if (ce)
791                         mb_cache_entry_release(ce);
792                 ea_bdebug(bh, "refcount now=%d",
793                         le32_to_cpu(HDR(bh)->h_refcount));
794                 unlock_buffer(bh);
795                 mark_buffer_dirty(bh);
796                 if (IS_SYNC(inode))
797                         sync_dirty_buffer(bh);
798                 dquot_free_block_nodirty(inode, 1);
799         }
800         EXT2_I(inode)->i_file_acl = 0;
801
802 cleanup:
803         brelse(bh);
804         up_write(&EXT2_I(inode)->xattr_sem);
805 }
806
807 /*
808  * ext2_xattr_put_super()
809  *
810  * This is called when a file system is unmounted.
811  */
812 void
813 ext2_xattr_put_super(struct super_block *sb)
814 {
815         mb_cache_shrink(sb->s_bdev);
816 }
817
818
819 /*
820  * ext2_xattr_cache_insert()
821  *
822  * Create a new entry in the extended attribute cache, and insert
823  * it unless such an entry is already in the cache.
824  *
825  * Returns 0, or a negative error number on failure.
826  */
827 static int
828 ext2_xattr_cache_insert(struct buffer_head *bh)
829 {
830         __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
831         struct mb_cache_entry *ce;
832         int error;
833
834         ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
835         if (!ce)
836                 return -ENOMEM;
837         error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
838         if (error) {
839                 mb_cache_entry_free(ce);
840                 if (error == -EBUSY) {
841                         ea_bdebug(bh, "already in cache (%d cache entries)",
842                                 atomic_read(&ext2_xattr_cache->c_entry_count));
843                         error = 0;
844                 }
845         } else {
846                 ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
847                           atomic_read(&ext2_xattr_cache->c_entry_count));
848                 mb_cache_entry_release(ce);
849         }
850         return error;
851 }
852
853 /*
854  * ext2_xattr_cmp()
855  *
856  * Compare two extended attribute blocks for equality.
857  *
858  * Returns 0 if the blocks are equal, 1 if they differ, and
859  * a negative error number on errors.
860  */
861 static int
862 ext2_xattr_cmp(struct ext2_xattr_header *header1,
863                struct ext2_xattr_header *header2)
864 {
865         struct ext2_xattr_entry *entry1, *entry2;
866
867         entry1 = ENTRY(header1+1);
868         entry2 = ENTRY(header2+1);
869         while (!IS_LAST_ENTRY(entry1)) {
870                 if (IS_LAST_ENTRY(entry2))
871                         return 1;
872                 if (entry1->e_hash != entry2->e_hash ||
873                     entry1->e_name_index != entry2->e_name_index ||
874                     entry1->e_name_len != entry2->e_name_len ||
875                     entry1->e_value_size != entry2->e_value_size ||
876                     memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
877                         return 1;
878                 if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
879                         return -EIO;
880                 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
881                            (char *)header2 + le16_to_cpu(entry2->e_value_offs),
882                            le32_to_cpu(entry1->e_value_size)))
883                         return 1;
884
885                 entry1 = EXT2_XATTR_NEXT(entry1);
886                 entry2 = EXT2_XATTR_NEXT(entry2);
887         }
888         if (!IS_LAST_ENTRY(entry2))
889                 return 1;
890         return 0;
891 }
892
893 /*
894  * ext2_xattr_cache_find()
895  *
896  * Find an identical extended attribute block.
897  *
898  * Returns a locked buffer head to the block found, or NULL if such
899  * a block was not found or an error occurred.
900  */
901 static struct buffer_head *
902 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
903 {
904         __u32 hash = le32_to_cpu(header->h_hash);
905         struct mb_cache_entry *ce;
906
907         if (!header->h_hash)
908                 return NULL;  /* never share */
909         ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
910 again:
911         ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev,
912                                        hash);
913         while (ce) {
914                 struct buffer_head *bh;
915
916                 if (IS_ERR(ce)) {
917                         if (PTR_ERR(ce) == -EAGAIN)
918                                 goto again;
919                         break;
920                 }
921
922                 bh = sb_bread(inode->i_sb, ce->e_block);
923                 if (!bh) {
924                         ext2_error(inode->i_sb, "ext2_xattr_cache_find",
925                                 "inode %ld: block %ld read error",
926                                 inode->i_ino, (unsigned long) ce->e_block);
927                 } else {
928                         lock_buffer(bh);
929                         if (le32_to_cpu(HDR(bh)->h_refcount) >
930                                    EXT2_XATTR_REFCOUNT_MAX) {
931                                 ea_idebug(inode, "block %ld refcount %d>%d",
932                                           (unsigned long) ce->e_block,
933                                           le32_to_cpu(HDR(bh)->h_refcount),
934                                           EXT2_XATTR_REFCOUNT_MAX);
935                         } else if (!ext2_xattr_cmp(header, HDR(bh))) {
936                                 ea_bdebug(bh, "b_count=%d",
937                                           atomic_read(&(bh->b_count)));
938                                 mb_cache_entry_release(ce);
939                                 return bh;
940                         }
941                         unlock_buffer(bh);
942                         brelse(bh);
943                 }
944                 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
945         }
946         return NULL;
947 }
948
949 #define NAME_HASH_SHIFT 5
950 #define VALUE_HASH_SHIFT 16
951
952 /*
953  * ext2_xattr_hash_entry()
954  *
955  * Compute the hash of an extended attribute.
956  */
957 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
958                                          struct ext2_xattr_entry *entry)
959 {
960         __u32 hash = 0;
961         char *name = entry->e_name;
962         int n;
963
964         for (n=0; n < entry->e_name_len; n++) {
965                 hash = (hash << NAME_HASH_SHIFT) ^
966                        (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
967                        *name++;
968         }
969
970         if (entry->e_value_block == 0 && entry->e_value_size != 0) {
971                 __le32 *value = (__le32 *)((char *)header +
972                         le16_to_cpu(entry->e_value_offs));
973                 for (n = (le32_to_cpu(entry->e_value_size) +
974                      EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
975                         hash = (hash << VALUE_HASH_SHIFT) ^
976                                (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
977                                le32_to_cpu(*value++);
978                 }
979         }
980         entry->e_hash = cpu_to_le32(hash);
981 }
982
983 #undef NAME_HASH_SHIFT
984 #undef VALUE_HASH_SHIFT
985
986 #define BLOCK_HASH_SHIFT 16
987
988 /*
989  * ext2_xattr_rehash()
990  *
991  * Re-compute the extended attribute hash value after an entry has changed.
992  */
993 static void ext2_xattr_rehash(struct ext2_xattr_header *header,
994                               struct ext2_xattr_entry *entry)
995 {
996         struct ext2_xattr_entry *here;
997         __u32 hash = 0;
998         
999         ext2_xattr_hash_entry(header, entry);
1000         here = ENTRY(header+1);
1001         while (!IS_LAST_ENTRY(here)) {
1002                 if (!here->e_hash) {
1003                         /* Block is not shared if an entry's hash value == 0 */
1004                         hash = 0;
1005                         break;
1006                 }
1007                 hash = (hash << BLOCK_HASH_SHIFT) ^
1008                        (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1009                        le32_to_cpu(here->e_hash);
1010                 here = EXT2_XATTR_NEXT(here);
1011         }
1012         header->h_hash = cpu_to_le32(hash);
1013 }
1014
1015 #undef BLOCK_HASH_SHIFT
1016
1017 int __init
1018 init_ext2_xattr(void)
1019 {
1020         ext2_xattr_cache = mb_cache_create("ext2_xattr", 6);
1021         if (!ext2_xattr_cache)
1022                 return -ENOMEM;
1023         return 0;
1024 }
1025
1026 void
1027 exit_ext2_xattr(void)
1028 {
1029         mb_cache_destroy(ext2_xattr_cache);
1030 }