]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/lustre/lustre/obdclass/lu_object.c
staging/lustre: don't assert module owner
[karo-tx-linux.git] / drivers / staging / lustre / lustre / obdclass / lu_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46
47 #include <linux/libcfs/libcfs.h>
48
49 # include <linux/module.h>
50
51 /* hash_long() */
52 #include <linux/libcfs/libcfs_hash.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_disk.h>
56 #include <lustre_fid.h>
57 #include <lu_object.h>
58 #include <lu_ref.h>
59 #include <linux/list.h>
60
61 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
62
63 /**
64  * Decrease reference counter on object. If last reference is freed, return
65  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
66  * case, free object immediately.
67  */
68 void lu_object_put(const struct lu_env *env, struct lu_object *o)
69 {
70         struct lu_site_bkt_data *bkt;
71         struct lu_object_header *top;
72         struct lu_site    *site;
73         struct lu_object        *orig;
74         cfs_hash_bd_t       bd;
75         const struct lu_fid     *fid;
76
77         top  = o->lo_header;
78         site = o->lo_dev->ld_site;
79         orig = o;
80
81         /*
82          * till we have full fids-on-OST implemented anonymous objects
83          * are possible in OSP. such an object isn't listed in the site
84          * so we should not remove it from the site.
85          */
86         fid = lu_object_fid(o);
87         if (fid_is_zero(fid)) {
88                 LASSERT(top->loh_hash.next == NULL
89                         && top->loh_hash.pprev == NULL);
90                 LASSERT(list_empty(&top->loh_lru));
91                 if (!atomic_dec_and_test(&top->loh_ref))
92                         return;
93                 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
94                         if (o->lo_ops->loo_object_release != NULL)
95                                 o->lo_ops->loo_object_release(env, o);
96                 }
97                 lu_object_free(env, orig);
98                 return;
99         }
100
101         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
102         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
103
104         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
105                 if (lu_object_is_dying(top)) {
106
107                         /*
108                          * somebody may be waiting for this, currently only
109                          * used for cl_object, see cl_object_put_last().
110                          */
111                         wake_up_all(&bkt->lsb_marche_funebre);
112                 }
113                 return;
114         }
115
116         LASSERT(bkt->lsb_busy > 0);
117         bkt->lsb_busy--;
118         /*
119          * When last reference is released, iterate over object
120          * layers, and notify them that object is no longer busy.
121          */
122         list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
123                 if (o->lo_ops->loo_object_release != NULL)
124                         o->lo_ops->loo_object_release(env, o);
125         }
126
127         if (!lu_object_is_dying(top)) {
128                 LASSERT(list_empty(&top->loh_lru));
129                 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
130                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
131                 return;
132         }
133
134         /*
135          * If object is dying (will not be cached), removed it
136          * from hash table and LRU.
137          *
138          * This is done with hash table and LRU lists locked. As the only
139          * way to acquire first reference to previously unreferenced
140          * object is through hash-table lookup (lu_object_find()),
141          * or LRU scanning (lu_site_purge()), that are done under hash-table
142          * and LRU lock, no race with concurrent object lookup is possible
143          * and we can safely destroy object below.
144          */
145         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
146                 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
147         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
148         /*
149          * Object was already removed from hash and lru above, can
150          * kill it.
151          */
152         lu_object_free(env, orig);
153 }
154 EXPORT_SYMBOL(lu_object_put);
155
156 /**
157  * Put object and don't keep in cache. This is temporary solution for
158  * multi-site objects when its layering is not constant.
159  */
160 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
161 {
162         set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
163         return lu_object_put(env, o);
164 }
165 EXPORT_SYMBOL(lu_object_put_nocache);
166
167 /**
168  * Kill the object and take it out of LRU cache.
169  * Currently used by client code for layout change.
170  */
171 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
172 {
173         struct lu_object_header *top;
174
175         top = o->lo_header;
176         set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
177         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
178                 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
179                 cfs_hash_bd_t bd;
180
181                 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
182                 list_del_init(&top->loh_lru);
183                 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
184                 cfs_hash_bd_unlock(obj_hash, &bd, 1);
185         }
186 }
187 EXPORT_SYMBOL(lu_object_unhash);
188
189 /**
190  * Allocate new object.
191  *
192  * This follows object creation protocol, described in the comment within
193  * struct lu_device_operations definition.
194  */
195 static struct lu_object *lu_object_alloc(const struct lu_env *env,
196                                          struct lu_device *dev,
197                                          const struct lu_fid *f,
198                                          const struct lu_object_conf *conf)
199 {
200         struct lu_object *scan;
201         struct lu_object *top;
202         struct list_head *layers;
203         int clean;
204         int result;
205         ENTRY;
206
207         /*
208          * Create top-level object slice. This will also create
209          * lu_object_header.
210          */
211         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
212         if (top == NULL)
213                 RETURN(ERR_PTR(-ENOMEM));
214         if (IS_ERR(top))
215                 RETURN(top);
216         /*
217          * This is the only place where object fid is assigned. It's constant
218          * after this point.
219          */
220         top->lo_header->loh_fid = *f;
221         layers = &top->lo_header->loh_layers;
222         do {
223                 /*
224                  * Call ->loo_object_init() repeatedly, until no more new
225                  * object slices are created.
226                  */
227                 clean = 1;
228                 list_for_each_entry(scan, layers, lo_linkage) {
229                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
230                                 continue;
231                         clean = 0;
232                         scan->lo_header = top->lo_header;
233                         result = scan->lo_ops->loo_object_init(env, scan, conf);
234                         if (result != 0) {
235                                 lu_object_free(env, top);
236                                 RETURN(ERR_PTR(result));
237                         }
238                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
239                 }
240         } while (!clean);
241
242         list_for_each_entry_reverse(scan, layers, lo_linkage) {
243                 if (scan->lo_ops->loo_object_start != NULL) {
244                         result = scan->lo_ops->loo_object_start(env, scan);
245                         if (result != 0) {
246                                 lu_object_free(env, top);
247                                 RETURN(ERR_PTR(result));
248                         }
249                 }
250         }
251
252         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
253         RETURN(top);
254 }
255
256 /**
257  * Free an object.
258  */
259 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
260 {
261         struct lu_site_bkt_data *bkt;
262         struct lu_site    *site;
263         struct lu_object        *scan;
264         struct list_head              *layers;
265         struct list_head               splice;
266
267         site   = o->lo_dev->ld_site;
268         layers = &o->lo_header->loh_layers;
269         bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
270         /*
271          * First call ->loo_object_delete() method to release all resources.
272          */
273         list_for_each_entry_reverse(scan, layers, lo_linkage) {
274                 if (scan->lo_ops->loo_object_delete != NULL)
275                         scan->lo_ops->loo_object_delete(env, scan);
276         }
277
278         /*
279          * Then, splice object layers into stand-alone list, and call
280          * ->loo_object_free() on all layers to free memory. Splice is
281          * necessary, because lu_object_header is freed together with the
282          * top-level slice.
283          */
284         INIT_LIST_HEAD(&splice);
285         list_splice_init(layers, &splice);
286         while (!list_empty(&splice)) {
287                 /*
288                  * Free layers in bottom-to-top order, so that object header
289                  * lives as long as possible and ->loo_object_free() methods
290                  * can look at its contents.
291                  */
292                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
293                 list_del_init(&o->lo_linkage);
294                 LASSERT(o->lo_ops->loo_object_free != NULL);
295                 o->lo_ops->loo_object_free(env, o);
296         }
297
298         if (waitqueue_active(&bkt->lsb_marche_funebre))
299                 wake_up_all(&bkt->lsb_marche_funebre);
300 }
301
302 /**
303  * Free \a nr objects from the cold end of the site LRU list.
304  */
305 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
306 {
307         struct lu_object_header *h;
308         struct lu_object_header *temp;
309         struct lu_site_bkt_data *bkt;
310         cfs_hash_bd_t       bd;
311         cfs_hash_bd_t       bd2;
312         struct list_head               dispose;
313         int                   did_sth;
314         int                   start;
315         int                   count;
316         int                   bnr;
317         int                   i;
318
319         if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
320                 RETURN(0);
321
322         INIT_LIST_HEAD(&dispose);
323         /*
324          * Under LRU list lock, scan LRU list and move unreferenced objects to
325          * the dispose list, removing them from LRU and hash table.
326          */
327         start = s->ls_purge_start;
328         bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
329  again:
330         did_sth = 0;
331         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
332                 if (i < start)
333                         continue;
334                 count = bnr;
335                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
336                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
337
338                 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
339                         LASSERT(atomic_read(&h->loh_ref) == 0);
340
341                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
342                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
343
344                         cfs_hash_bd_del_locked(s->ls_obj_hash,
345                                                &bd2, &h->loh_hash);
346                         list_move(&h->loh_lru, &dispose);
347                         if (did_sth == 0)
348                                 did_sth = 1;
349
350                         if (nr != ~0 && --nr == 0)
351                                 break;
352
353                         if (count > 0 && --count == 0)
354                                 break;
355
356                 }
357                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
358                 cond_resched();
359                 /*
360                  * Free everything on the dispose list. This is safe against
361                  * races due to the reasons described in lu_object_put().
362                  */
363                 while (!list_empty(&dispose)) {
364                         h = container_of0(dispose.next,
365                                           struct lu_object_header, loh_lru);
366                         list_del_init(&h->loh_lru);
367                         lu_object_free(env, lu_object_top(h));
368                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
369                 }
370
371                 if (nr == 0)
372                         break;
373         }
374
375         if (nr != 0 && did_sth && start != 0) {
376                 start = 0; /* restart from the first bucket */
377                 goto again;
378         }
379         /* race on s->ls_purge_start, but nobody cares */
380         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
381
382         return nr;
383 }
384 EXPORT_SYMBOL(lu_site_purge);
385
386 /*
387  * Object printing.
388  *
389  * Code below has to jump through certain loops to output object description
390  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
391  * composes object description from strings that are parts of _lines_ of
392  * output (i.e., strings that are not terminated by newline). This doesn't fit
393  * very well into libcfs_debug_msg() interface that assumes that each message
394  * supplied to it is a self-contained output line.
395  *
396  * To work around this, strings are collected in a temporary buffer
397  * (implemented as a value of lu_cdebug_key key), until terminating newline
398  * character is detected.
399  *
400  */
401
402 enum {
403         /**
404          * Maximal line size.
405          *
406          * XXX overflow is not handled correctly.
407          */
408         LU_CDEBUG_LINE = 512
409 };
410
411 struct lu_cdebug_data {
412         /**
413          * Temporary buffer.
414          */
415         char lck_area[LU_CDEBUG_LINE];
416 };
417
418 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
419 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
420
421 /**
422  * Key, holding temporary buffer. This key is registered very early by
423  * lu_global_init().
424  */
425 struct lu_context_key lu_global_key = {
426         .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
427                     LCT_MG_THREAD | LCT_CL_THREAD,
428         .lct_init = lu_global_key_init,
429         .lct_fini = lu_global_key_fini
430 };
431
432 /**
433  * Printer function emitting messages through libcfs_debug_msg().
434  */
435 int lu_cdebug_printer(const struct lu_env *env,
436                       void *cookie, const char *format, ...)
437 {
438         struct libcfs_debug_msg_data *msgdata = cookie;
439         struct lu_cdebug_data   *key;
440         int used;
441         int complete;
442         va_list args;
443
444         va_start(args, format);
445
446         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
447         LASSERT(key != NULL);
448
449         used = strlen(key->lck_area);
450         complete = format[strlen(format) - 1] == '\n';
451         /*
452          * Append new chunk to the buffer.
453          */
454         vsnprintf(key->lck_area + used,
455                   ARRAY_SIZE(key->lck_area) - used, format, args);
456         if (complete) {
457                 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
458                         libcfs_debug_msg(msgdata, "%s", key->lck_area);
459                 key->lck_area[0] = 0;
460         }
461         va_end(args);
462         return 0;
463 }
464 EXPORT_SYMBOL(lu_cdebug_printer);
465
466 /**
467  * Print object header.
468  */
469 void lu_object_header_print(const struct lu_env *env, void *cookie,
470                             lu_printer_t printer,
471                             const struct lu_object_header *hdr)
472 {
473         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
474                    hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
475                    PFID(&hdr->loh_fid),
476                    hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
477                    list_empty((struct list_head *)&hdr->loh_lru) ? \
478                    "" : " lru",
479                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
480 }
481 EXPORT_SYMBOL(lu_object_header_print);
482
483 /**
484  * Print human readable representation of the \a o to the \a printer.
485  */
486 void lu_object_print(const struct lu_env *env, void *cookie,
487                      lu_printer_t printer, const struct lu_object *o)
488 {
489         static const char ruler[] = "........................................";
490         struct lu_object_header *top;
491         int depth;
492
493         top = o->lo_header;
494         lu_object_header_print(env, cookie, printer, top);
495         (*printer)(env, cookie, "{ \n");
496         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
497                 depth = o->lo_depth + 4;
498
499                 /*
500                  * print `.' \a depth times followed by type name and address
501                  */
502                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
503                            o->lo_dev->ld_type->ldt_name, o);
504                 if (o->lo_ops->loo_object_print != NULL)
505                         o->lo_ops->loo_object_print(env, cookie, printer, o);
506                 (*printer)(env, cookie, "\n");
507         }
508         (*printer)(env, cookie, "} header@%p\n", top);
509 }
510 EXPORT_SYMBOL(lu_object_print);
511
512 /**
513  * Check object consistency.
514  */
515 int lu_object_invariant(const struct lu_object *o)
516 {
517         struct lu_object_header *top;
518
519         top = o->lo_header;
520         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
521                 if (o->lo_ops->loo_object_invariant != NULL &&
522                     !o->lo_ops->loo_object_invariant(o))
523                         return 0;
524         }
525         return 1;
526 }
527 EXPORT_SYMBOL(lu_object_invariant);
528
529 static struct lu_object *htable_lookup(struct lu_site *s,
530                                        cfs_hash_bd_t *bd,
531                                        const struct lu_fid *f,
532                                        wait_queue_t *waiter,
533                                        __u64 *version)
534 {
535         struct lu_site_bkt_data *bkt;
536         struct lu_object_header *h;
537         struct hlist_node       *hnode;
538         __u64  ver = cfs_hash_bd_version_get(bd);
539
540         if (*version == ver)
541                 return NULL;
542
543         *version = ver;
544         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
545         /* cfs_hash_bd_peek_locked is a somehow "internal" function
546          * of cfs_hash, it doesn't add refcount on object. */
547         hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
548         if (hnode == NULL) {
549                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
550                 return NULL;
551         }
552
553         h = container_of0(hnode, struct lu_object_header, loh_hash);
554         if (likely(!lu_object_is_dying(h))) {
555                 cfs_hash_get(s->ls_obj_hash, hnode);
556                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
557                 list_del_init(&h->loh_lru);
558                 return lu_object_top(h);
559         }
560
561         /*
562          * Lookup found an object being destroyed this object cannot be
563          * returned (to assure that references to dying objects are eventually
564          * drained), and moreover, lookup has to wait until object is freed.
565          */
566
567         init_waitqueue_entry_current(waiter);
568         add_wait_queue(&bkt->lsb_marche_funebre, waiter);
569         set_current_state(TASK_UNINTERRUPTIBLE);
570         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
571         return ERR_PTR(-EAGAIN);
572 }
573
574 /**
575  * Search cache for an object with the fid \a f. If such object is found,
576  * return it. Otherwise, create new object, insert it into cache and return
577  * it. In any case, additional reference is acquired on the returned object.
578  */
579 struct lu_object *lu_object_find(const struct lu_env *env,
580                                  struct lu_device *dev, const struct lu_fid *f,
581                                  const struct lu_object_conf *conf)
582 {
583         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
584 }
585 EXPORT_SYMBOL(lu_object_find);
586
587 static struct lu_object *lu_object_new(const struct lu_env *env,
588                                        struct lu_device *dev,
589                                        const struct lu_fid *f,
590                                        const struct lu_object_conf *conf)
591 {
592         struct lu_object        *o;
593         cfs_hash_t            *hs;
594         cfs_hash_bd_t       bd;
595         struct lu_site_bkt_data *bkt;
596
597         o = lu_object_alloc(env, dev, f, conf);
598         if (unlikely(IS_ERR(o)))
599                 return o;
600
601         hs = dev->ld_site->ls_obj_hash;
602         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
603         bkt = cfs_hash_bd_extra_get(hs, &bd);
604         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
605         bkt->lsb_busy++;
606         cfs_hash_bd_unlock(hs, &bd, 1);
607         return o;
608 }
609
610 /**
611  * Core logic of lu_object_find*() functions.
612  */
613 static struct lu_object *lu_object_find_try(const struct lu_env *env,
614                                             struct lu_device *dev,
615                                             const struct lu_fid *f,
616                                             const struct lu_object_conf *conf,
617                                             wait_queue_t *waiter)
618 {
619         struct lu_object      *o;
620         struct lu_object      *shadow;
621         struct lu_site  *s;
622         cfs_hash_t          *hs;
623         cfs_hash_bd_t     bd;
624         __u64             version = 0;
625
626         /*
627          * This uses standard index maintenance protocol:
628          *
629          *     - search index under lock, and return object if found;
630          *     - otherwise, unlock index, allocate new object;
631          *     - lock index and search again;
632          *     - if nothing is found (usual case), insert newly created
633          *       object into index;
634          *     - otherwise (race: other thread inserted object), free
635          *       object just allocated.
636          *     - unlock index;
637          *     - return object.
638          *
639          * For "LOC_F_NEW" case, we are sure the object is new established.
640          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
641          * just alloc and insert directly.
642          *
643          * If dying object is found during index search, add @waiter to the
644          * site wait-queue and return ERR_PTR(-EAGAIN).
645          */
646         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
647                 return lu_object_new(env, dev, f, conf);
648
649         s  = dev->ld_site;
650         hs = s->ls_obj_hash;
651         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
652         o = htable_lookup(s, &bd, f, waiter, &version);
653         cfs_hash_bd_unlock(hs, &bd, 1);
654         if (o != NULL)
655                 return o;
656
657         /*
658          * Allocate new object. This may result in rather complicated
659          * operations, including fld queries, inode loading, etc.
660          */
661         o = lu_object_alloc(env, dev, f, conf);
662         if (unlikely(IS_ERR(o)))
663                 return o;
664
665         LASSERT(lu_fid_eq(lu_object_fid(o), f));
666
667         cfs_hash_bd_lock(hs, &bd, 1);
668
669         shadow = htable_lookup(s, &bd, f, waiter, &version);
670         if (likely(shadow == NULL)) {
671                 struct lu_site_bkt_data *bkt;
672
673                 bkt = cfs_hash_bd_extra_get(hs, &bd);
674                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
675                 bkt->lsb_busy++;
676                 cfs_hash_bd_unlock(hs, &bd, 1);
677                 return o;
678         }
679
680         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
681         cfs_hash_bd_unlock(hs, &bd, 1);
682         lu_object_free(env, o);
683         return shadow;
684 }
685
686 /**
687  * Much like lu_object_find(), but top level device of object is specifically
688  * \a dev rather than top level device of the site. This interface allows
689  * objects of different "stacking" to be created within the same site.
690  */
691 struct lu_object *lu_object_find_at(const struct lu_env *env,
692                                     struct lu_device *dev,
693                                     const struct lu_fid *f,
694                                     const struct lu_object_conf *conf)
695 {
696         struct lu_site_bkt_data *bkt;
697         struct lu_object        *obj;
698         wait_queue_t       wait;
699
700         while (1) {
701                 obj = lu_object_find_try(env, dev, f, conf, &wait);
702                 if (obj != ERR_PTR(-EAGAIN))
703                         return obj;
704                 /*
705                  * lu_object_find_try() already added waiter into the
706                  * wait queue.
707                  */
708                 waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
709                 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
710                 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
711         }
712 }
713 EXPORT_SYMBOL(lu_object_find_at);
714
715 /**
716  * Find object with given fid, and return its slice belonging to given device.
717  */
718 struct lu_object *lu_object_find_slice(const struct lu_env *env,
719                                        struct lu_device *dev,
720                                        const struct lu_fid *f,
721                                        const struct lu_object_conf *conf)
722 {
723         struct lu_object *top;
724         struct lu_object *obj;
725
726         top = lu_object_find(env, dev, f, conf);
727         if (!IS_ERR(top)) {
728                 obj = lu_object_locate(top->lo_header, dev->ld_type);
729                 if (obj == NULL)
730                         lu_object_put(env, top);
731         } else
732                 obj = top;
733         return obj;
734 }
735 EXPORT_SYMBOL(lu_object_find_slice);
736
737 /**
738  * Global list of all device types.
739  */
740 static LIST_HEAD(lu_device_types);
741
742 int lu_device_type_init(struct lu_device_type *ldt)
743 {
744         int result = 0;
745
746         INIT_LIST_HEAD(&ldt->ldt_linkage);
747         if (ldt->ldt_ops->ldto_init)
748                 result = ldt->ldt_ops->ldto_init(ldt);
749         if (result == 0)
750                 list_add(&ldt->ldt_linkage, &lu_device_types);
751         return result;
752 }
753 EXPORT_SYMBOL(lu_device_type_init);
754
755 void lu_device_type_fini(struct lu_device_type *ldt)
756 {
757         list_del_init(&ldt->ldt_linkage);
758         if (ldt->ldt_ops->ldto_fini)
759                 ldt->ldt_ops->ldto_fini(ldt);
760 }
761 EXPORT_SYMBOL(lu_device_type_fini);
762
763 void lu_types_stop(void)
764 {
765         struct lu_device_type *ldt;
766
767         list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
768                 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
769                         ldt->ldt_ops->ldto_stop(ldt);
770         }
771 }
772 EXPORT_SYMBOL(lu_types_stop);
773
774 /**
775  * Global list of all sites on this node
776  */
777 static LIST_HEAD(lu_sites);
778 static DEFINE_MUTEX(lu_sites_guard);
779
780 /**
781  * Global environment used by site shrinker.
782  */
783 static struct lu_env lu_shrink_env;
784
785 struct lu_site_print_arg {
786         struct lu_env   *lsp_env;
787         void        *lsp_cookie;
788         lu_printer_t     lsp_printer;
789 };
790
791 static int
792 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
793                   struct hlist_node *hnode, void *data)
794 {
795         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
796         struct lu_object_header  *h;
797
798         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
799         if (!list_empty(&h->loh_layers)) {
800                 const struct lu_object *o;
801
802                 o = lu_object_top(h);
803                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
804                                 arg->lsp_printer, o);
805         } else {
806                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
807                                        arg->lsp_printer, h);
808         }
809         return 0;
810 }
811
812 /**
813  * Print all objects in \a s.
814  */
815 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
816                    lu_printer_t printer)
817 {
818         struct lu_site_print_arg arg = {
819                 .lsp_env     = (struct lu_env *)env,
820                 .lsp_cookie  = cookie,
821                 .lsp_printer = printer,
822         };
823
824         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
825 }
826 EXPORT_SYMBOL(lu_site_print);
827
828 enum {
829         LU_CACHE_PERCENT_MAX     = 50,
830         LU_CACHE_PERCENT_DEFAULT = 20
831 };
832
833 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
834 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
835                 "Percentage of memory to be used as lu_object cache");
836
837 /**
838  * Return desired hash table order.
839  */
840 static int lu_htable_order(void)
841 {
842         unsigned long cache_size;
843         int bits;
844
845         /*
846          * Calculate hash table size, assuming that we want reasonable
847          * performance when 20% of total memory is occupied by cache of
848          * lu_objects.
849          *
850          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
851          */
852         cache_size = totalram_pages;
853
854 #if BITS_PER_LONG == 32
855         /* limit hashtable size for lowmem systems to low RAM */
856         if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
857                 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
858 #endif
859
860         /* clear off unreasonable cache setting. */
861         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
862                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
863                       " the range of (0, %u]. Will use default value: %u.\n",
864                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
865                       LU_CACHE_PERCENT_DEFAULT);
866
867                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
868         }
869         cache_size = cache_size / 100 * lu_cache_percent *
870                 (PAGE_CACHE_SIZE / 1024);
871
872         for (bits = 1; (1 << bits) < cache_size; ++bits) {
873                 ;
874         }
875         return bits;
876 }
877
878 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
879                                 const void *key, unsigned mask)
880 {
881         struct lu_fid  *fid = (struct lu_fid *)key;
882         __u32      hash;
883
884         hash = fid_flatten32(fid);
885         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
886         hash = cfs_hash_long(hash, hs->hs_bkt_bits);
887
888         /* give me another random factor */
889         hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
890
891         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
892         hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
893
894         return hash & mask;
895 }
896
897 static void *lu_obj_hop_object(struct hlist_node *hnode)
898 {
899         return hlist_entry(hnode, struct lu_object_header, loh_hash);
900 }
901
902 static void *lu_obj_hop_key(struct hlist_node *hnode)
903 {
904         struct lu_object_header *h;
905
906         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
907         return &h->loh_fid;
908 }
909
910 static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
911 {
912         struct lu_object_header *h;
913
914         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
915         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
916 }
917
918 static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
919 {
920         struct lu_object_header *h;
921
922         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
923         if (atomic_add_return(1, &h->loh_ref) == 1) {
924                 struct lu_site_bkt_data *bkt;
925                 cfs_hash_bd_t       bd;
926
927                 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
928                 bkt = cfs_hash_bd_extra_get(hs, &bd);
929                 bkt->lsb_busy++;
930         }
931 }
932
933 static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
934 {
935         LBUG(); /* we should never called it */
936 }
937
938 cfs_hash_ops_t lu_site_hash_ops = {
939         .hs_hash        = lu_obj_hop_hash,
940         .hs_key  = lu_obj_hop_key,
941         .hs_keycmp      = lu_obj_hop_keycmp,
942         .hs_object      = lu_obj_hop_object,
943         .hs_get  = lu_obj_hop_get,
944         .hs_put_locked  = lu_obj_hop_put_locked,
945 };
946
947 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
948 {
949         spin_lock(&s->ls_ld_lock);
950         if (list_empty(&d->ld_linkage))
951                 list_add(&d->ld_linkage, &s->ls_ld_linkage);
952         spin_unlock(&s->ls_ld_lock);
953 }
954 EXPORT_SYMBOL(lu_dev_add_linkage);
955
956 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
957 {
958         spin_lock(&s->ls_ld_lock);
959         list_del_init(&d->ld_linkage);
960         spin_unlock(&s->ls_ld_lock);
961 }
962 EXPORT_SYMBOL(lu_dev_del_linkage);
963
964 /**
965  * Initialize site \a s, with \a d as the top level device.
966  */
967 #define LU_SITE_BITS_MIN    12
968 #define LU_SITE_BITS_MAX    24
969 /**
970  * total 256 buckets, we don't want too many buckets because:
971  * - consume too much memory
972  * - avoid unbalanced LRU list
973  */
974 #define LU_SITE_BKT_BITS    8
975
976 int lu_site_init(struct lu_site *s, struct lu_device *top)
977 {
978         struct lu_site_bkt_data *bkt;
979         cfs_hash_bd_t bd;
980         char name[16];
981         int bits;
982         int i;
983         ENTRY;
984
985         memset(s, 0, sizeof *s);
986         bits = lu_htable_order();
987         snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
988         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
989              bits >= LU_SITE_BITS_MIN; bits--) {
990                 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
991                                                  bits - LU_SITE_BKT_BITS,
992                                                  sizeof(*bkt), 0, 0,
993                                                  &lu_site_hash_ops,
994                                                  CFS_HASH_SPIN_BKTLOCK |
995                                                  CFS_HASH_NO_ITEMREF |
996                                                  CFS_HASH_DEPTH |
997                                                  CFS_HASH_ASSERT_EMPTY);
998                 if (s->ls_obj_hash != NULL)
999                         break;
1000         }
1001
1002         if (s->ls_obj_hash == NULL) {
1003                 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1004                 return -ENOMEM;
1005         }
1006
1007         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1008                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1009                 INIT_LIST_HEAD(&bkt->lsb_lru);
1010                 init_waitqueue_head(&bkt->lsb_marche_funebre);
1011         }
1012
1013         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1014         if (s->ls_stats == NULL) {
1015                 cfs_hash_putref(s->ls_obj_hash);
1016                 s->ls_obj_hash = NULL;
1017                 return -ENOMEM;
1018         }
1019
1020         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1021                              0, "created", "created");
1022         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1023                              0, "cache_hit", "cache_hit");
1024         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1025                              0, "cache_miss", "cache_miss");
1026         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1027                              0, "cache_race", "cache_race");
1028         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1029                              0, "cache_death_race", "cache_death_race");
1030         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1031                              0, "lru_purged", "lru_purged");
1032
1033         INIT_LIST_HEAD(&s->ls_linkage);
1034         s->ls_top_dev = top;
1035         top->ld_site = s;
1036         lu_device_get(top);
1037         lu_ref_add(&top->ld_reference, "site-top", s);
1038
1039         INIT_LIST_HEAD(&s->ls_ld_linkage);
1040         spin_lock_init(&s->ls_ld_lock);
1041
1042         lu_dev_add_linkage(s, top);
1043
1044         RETURN(0);
1045 }
1046 EXPORT_SYMBOL(lu_site_init);
1047
1048 /**
1049  * Finalize \a s and release its resources.
1050  */
1051 void lu_site_fini(struct lu_site *s)
1052 {
1053         mutex_lock(&lu_sites_guard);
1054         list_del_init(&s->ls_linkage);
1055         mutex_unlock(&lu_sites_guard);
1056
1057         if (s->ls_obj_hash != NULL) {
1058                 cfs_hash_putref(s->ls_obj_hash);
1059                 s->ls_obj_hash = NULL;
1060         }
1061
1062         if (s->ls_top_dev != NULL) {
1063                 s->ls_top_dev->ld_site = NULL;
1064                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1065                 lu_device_put(s->ls_top_dev);
1066                 s->ls_top_dev = NULL;
1067         }
1068
1069         if (s->ls_stats != NULL)
1070                 lprocfs_free_stats(&s->ls_stats);
1071 }
1072 EXPORT_SYMBOL(lu_site_fini);
1073
1074 /**
1075  * Called when initialization of stack for this site is completed.
1076  */
1077 int lu_site_init_finish(struct lu_site *s)
1078 {
1079         int result;
1080         mutex_lock(&lu_sites_guard);
1081         result = lu_context_refill(&lu_shrink_env.le_ctx);
1082         if (result == 0)
1083                 list_add(&s->ls_linkage, &lu_sites);
1084         mutex_unlock(&lu_sites_guard);
1085         return result;
1086 }
1087 EXPORT_SYMBOL(lu_site_init_finish);
1088
1089 /**
1090  * Acquire additional reference on device \a d
1091  */
1092 void lu_device_get(struct lu_device *d)
1093 {
1094         atomic_inc(&d->ld_ref);
1095 }
1096 EXPORT_SYMBOL(lu_device_get);
1097
1098 /**
1099  * Release reference on device \a d.
1100  */
1101 void lu_device_put(struct lu_device *d)
1102 {
1103         LASSERT(atomic_read(&d->ld_ref) > 0);
1104         atomic_dec(&d->ld_ref);
1105 }
1106 EXPORT_SYMBOL(lu_device_put);
1107
1108 /**
1109  * Initialize device \a d of type \a t.
1110  */
1111 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1112 {
1113         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1114                 t->ldt_ops->ldto_start(t);
1115         memset(d, 0, sizeof *d);
1116         atomic_set(&d->ld_ref, 0);
1117         d->ld_type = t;
1118         lu_ref_init(&d->ld_reference);
1119         INIT_LIST_HEAD(&d->ld_linkage);
1120         return 0;
1121 }
1122 EXPORT_SYMBOL(lu_device_init);
1123
1124 /**
1125  * Finalize device \a d.
1126  */
1127 void lu_device_fini(struct lu_device *d)
1128 {
1129         struct lu_device_type *t;
1130
1131         t = d->ld_type;
1132         if (d->ld_obd != NULL) {
1133                 d->ld_obd->obd_lu_dev = NULL;
1134                 d->ld_obd = NULL;
1135         }
1136
1137         lu_ref_fini(&d->ld_reference);
1138         LASSERTF(atomic_read(&d->ld_ref) == 0,
1139                  "Refcount is %u\n", atomic_read(&d->ld_ref));
1140         LASSERT(t->ldt_device_nr > 0);
1141         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1142                 t->ldt_ops->ldto_stop(t);
1143 }
1144 EXPORT_SYMBOL(lu_device_fini);
1145
1146 /**
1147  * Initialize object \a o that is part of compound object \a h and was created
1148  * by device \a d.
1149  */
1150 int lu_object_init(struct lu_object *o,
1151                    struct lu_object_header *h, struct lu_device *d)
1152 {
1153         memset(o, 0, sizeof *o);
1154         o->lo_header = h;
1155         o->lo_dev    = d;
1156         lu_device_get(d);
1157         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1158         INIT_LIST_HEAD(&o->lo_linkage);
1159         return 0;
1160 }
1161 EXPORT_SYMBOL(lu_object_init);
1162
1163 /**
1164  * Finalize object and release its resources.
1165  */
1166 void lu_object_fini(struct lu_object *o)
1167 {
1168         struct lu_device *dev = o->lo_dev;
1169
1170         LASSERT(list_empty(&o->lo_linkage));
1171
1172         if (dev != NULL) {
1173                 lu_ref_del_at(&dev->ld_reference,
1174                               o->lo_dev_ref , "lu_object", o);
1175                 lu_device_put(dev);
1176                 o->lo_dev = NULL;
1177         }
1178 }
1179 EXPORT_SYMBOL(lu_object_fini);
1180
1181 /**
1182  * Add object \a o as first layer of compound object \a h
1183  *
1184  * This is typically called by the ->ldo_object_alloc() method of top-level
1185  * device.
1186  */
1187 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1188 {
1189         list_move(&o->lo_linkage, &h->loh_layers);
1190 }
1191 EXPORT_SYMBOL(lu_object_add_top);
1192
1193 /**
1194  * Add object \a o as a layer of compound object, going after \a before.
1195  *
1196  * This is typically called by the ->ldo_object_alloc() method of \a
1197  * before->lo_dev.
1198  */
1199 void lu_object_add(struct lu_object *before, struct lu_object *o)
1200 {
1201         list_move(&o->lo_linkage, &before->lo_linkage);
1202 }
1203 EXPORT_SYMBOL(lu_object_add);
1204
1205 /**
1206  * Initialize compound object.
1207  */
1208 int lu_object_header_init(struct lu_object_header *h)
1209 {
1210         memset(h, 0, sizeof *h);
1211         atomic_set(&h->loh_ref, 1);
1212         INIT_HLIST_NODE(&h->loh_hash);
1213         INIT_LIST_HEAD(&h->loh_lru);
1214         INIT_LIST_HEAD(&h->loh_layers);
1215         lu_ref_init(&h->loh_reference);
1216         return 0;
1217 }
1218 EXPORT_SYMBOL(lu_object_header_init);
1219
1220 /**
1221  * Finalize compound object.
1222  */
1223 void lu_object_header_fini(struct lu_object_header *h)
1224 {
1225         LASSERT(list_empty(&h->loh_layers));
1226         LASSERT(list_empty(&h->loh_lru));
1227         LASSERT(hlist_unhashed(&h->loh_hash));
1228         lu_ref_fini(&h->loh_reference);
1229 }
1230 EXPORT_SYMBOL(lu_object_header_fini);
1231
1232 /**
1233  * Given a compound object, find its slice, corresponding to the device type
1234  * \a dtype.
1235  */
1236 struct lu_object *lu_object_locate(struct lu_object_header *h,
1237                                    const struct lu_device_type *dtype)
1238 {
1239         struct lu_object *o;
1240
1241         list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1242                 if (o->lo_dev->ld_type == dtype)
1243                         return o;
1244         }
1245         return NULL;
1246 }
1247 EXPORT_SYMBOL(lu_object_locate);
1248
1249
1250
1251 /**
1252  * Finalize and free devices in the device stack.
1253  *
1254  * Finalize device stack by purging object cache, and calling
1255  * lu_device_type_operations::ldto_device_fini() and
1256  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1257  */
1258 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1259 {
1260         struct lu_site   *site = top->ld_site;
1261         struct lu_device *scan;
1262         struct lu_device *next;
1263
1264         lu_site_purge(env, site, ~0);
1265         for (scan = top; scan != NULL; scan = next) {
1266                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1267                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1268                 lu_device_put(scan);
1269         }
1270
1271         /* purge again. */
1272         lu_site_purge(env, site, ~0);
1273
1274         for (scan = top; scan != NULL; scan = next) {
1275                 const struct lu_device_type *ldt = scan->ld_type;
1276                 struct obd_type      *type;
1277
1278                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1279                 type = ldt->ldt_obd_type;
1280                 if (type != NULL) {
1281                         type->typ_refcnt--;
1282                         class_put_type(type);
1283                 }
1284         }
1285 }
1286 EXPORT_SYMBOL(lu_stack_fini);
1287
1288 enum {
1289         /**
1290          * Maximal number of tld slots.
1291          */
1292         LU_CONTEXT_KEY_NR = 40
1293 };
1294
1295 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1296
1297 static DEFINE_SPINLOCK(lu_keys_guard);
1298
1299 /**
1300  * Global counter incremented whenever key is registered, unregistered,
1301  * revived or quiesced. This is used to void unnecessary calls to
1302  * lu_context_refill(). No locking is provided, as initialization and shutdown
1303  * are supposed to be externally serialized.
1304  */
1305 static unsigned key_set_version = 0;
1306
1307 /**
1308  * Register new key.
1309  */
1310 int lu_context_key_register(struct lu_context_key *key)
1311 {
1312         int result;
1313         int i;
1314
1315         LASSERT(key->lct_init != NULL);
1316         LASSERT(key->lct_fini != NULL);
1317         LASSERT(key->lct_tags != 0);
1318
1319         result = -ENFILE;
1320         spin_lock(&lu_keys_guard);
1321         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1322                 if (lu_keys[i] == NULL) {
1323                         key->lct_index = i;
1324                         atomic_set(&key->lct_used, 1);
1325                         lu_keys[i] = key;
1326                         lu_ref_init(&key->lct_reference);
1327                         result = 0;
1328                         ++key_set_version;
1329                         break;
1330                 }
1331         }
1332         spin_unlock(&lu_keys_guard);
1333         return result;
1334 }
1335 EXPORT_SYMBOL(lu_context_key_register);
1336
1337 static void key_fini(struct lu_context *ctx, int index)
1338 {
1339         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1340                 struct lu_context_key *key;
1341
1342                 key = lu_keys[index];
1343                 LASSERT(key != NULL);
1344                 LASSERT(key->lct_fini != NULL);
1345                 LASSERT(atomic_read(&key->lct_used) > 1);
1346
1347                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1348                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1349                 atomic_dec(&key->lct_used);
1350
1351                 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1352 #ifdef CONFIG_MODULE_UNLOAD
1353                         LINVRNT(module_refcount(key->lct_owner) > 0);
1354 #endif
1355                         module_put(key->lct_owner);
1356                 }
1357                 ctx->lc_value[index] = NULL;
1358         }
1359 }
1360
1361 /**
1362  * Deregister key.
1363  */
1364 void lu_context_key_degister(struct lu_context_key *key)
1365 {
1366         LASSERT(atomic_read(&key->lct_used) >= 1);
1367         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1368
1369         lu_context_key_quiesce(key);
1370
1371         ++key_set_version;
1372         spin_lock(&lu_keys_guard);
1373         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1374         if (lu_keys[key->lct_index]) {
1375                 lu_keys[key->lct_index] = NULL;
1376                 lu_ref_fini(&key->lct_reference);
1377         }
1378         spin_unlock(&lu_keys_guard);
1379
1380         LASSERTF(atomic_read(&key->lct_used) == 1,
1381                  "key has instances: %d\n",
1382                  atomic_read(&key->lct_used));
1383 }
1384 EXPORT_SYMBOL(lu_context_key_degister);
1385
1386 /**
1387  * Register a number of keys. This has to be called after all keys have been
1388  * initialized by a call to LU_CONTEXT_KEY_INIT().
1389  */
1390 int lu_context_key_register_many(struct lu_context_key *k, ...)
1391 {
1392         struct lu_context_key *key = k;
1393         va_list args;
1394         int result;
1395
1396         va_start(args, k);
1397         do {
1398                 result = lu_context_key_register(key);
1399                 if (result)
1400                         break;
1401                 key = va_arg(args, struct lu_context_key *);
1402         } while (key != NULL);
1403         va_end(args);
1404
1405         if (result != 0) {
1406                 va_start(args, k);
1407                 while (k != key) {
1408                         lu_context_key_degister(k);
1409                         k = va_arg(args, struct lu_context_key *);
1410                 }
1411                 va_end(args);
1412         }
1413
1414         return result;
1415 }
1416 EXPORT_SYMBOL(lu_context_key_register_many);
1417
1418 /**
1419  * De-register a number of keys. This is a dual to
1420  * lu_context_key_register_many().
1421  */
1422 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1423 {
1424         va_list args;
1425
1426         va_start(args, k);
1427         do {
1428                 lu_context_key_degister(k);
1429                 k = va_arg(args, struct lu_context_key*);
1430         } while (k != NULL);
1431         va_end(args);
1432 }
1433 EXPORT_SYMBOL(lu_context_key_degister_many);
1434
1435 /**
1436  * Revive a number of keys.
1437  */
1438 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1439 {
1440         va_list args;
1441
1442         va_start(args, k);
1443         do {
1444                 lu_context_key_revive(k);
1445                 k = va_arg(args, struct lu_context_key*);
1446         } while (k != NULL);
1447         va_end(args);
1448 }
1449 EXPORT_SYMBOL(lu_context_key_revive_many);
1450
1451 /**
1452  * Quiescent a number of keys.
1453  */
1454 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1455 {
1456         va_list args;
1457
1458         va_start(args, k);
1459         do {
1460                 lu_context_key_quiesce(k);
1461                 k = va_arg(args, struct lu_context_key*);
1462         } while (k != NULL);
1463         va_end(args);
1464 }
1465 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1466
1467 /**
1468  * Return value associated with key \a key in context \a ctx.
1469  */
1470 void *lu_context_key_get(const struct lu_context *ctx,
1471                          const struct lu_context_key *key)
1472 {
1473         LINVRNT(ctx->lc_state == LCS_ENTERED);
1474         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1475         LASSERT(lu_keys[key->lct_index] == key);
1476         return ctx->lc_value[key->lct_index];
1477 }
1478 EXPORT_SYMBOL(lu_context_key_get);
1479
1480 /**
1481  * List of remembered contexts. XXX document me.
1482  */
1483 static LIST_HEAD(lu_context_remembered);
1484
1485 /**
1486  * Destroy \a key in all remembered contexts. This is used to destroy key
1487  * values in "shared" contexts (like service threads), when a module owning
1488  * the key is about to be unloaded.
1489  */
1490 void lu_context_key_quiesce(struct lu_context_key *key)
1491 {
1492         struct lu_context *ctx;
1493
1494         if (!(key->lct_tags & LCT_QUIESCENT)) {
1495                 /*
1496                  * XXX layering violation.
1497                  */
1498                 key->lct_tags |= LCT_QUIESCENT;
1499                 /*
1500                  * XXX memory barrier has to go here.
1501                  */
1502                 spin_lock(&lu_keys_guard);
1503                 list_for_each_entry(ctx, &lu_context_remembered,
1504                                         lc_remember)
1505                         key_fini(ctx, key->lct_index);
1506                 spin_unlock(&lu_keys_guard);
1507                 ++key_set_version;
1508         }
1509 }
1510 EXPORT_SYMBOL(lu_context_key_quiesce);
1511
1512 void lu_context_key_revive(struct lu_context_key *key)
1513 {
1514         key->lct_tags &= ~LCT_QUIESCENT;
1515         ++key_set_version;
1516 }
1517 EXPORT_SYMBOL(lu_context_key_revive);
1518
1519 static void keys_fini(struct lu_context *ctx)
1520 {
1521         int     i;
1522
1523         if (ctx->lc_value == NULL)
1524                 return;
1525
1526         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1527                 key_fini(ctx, i);
1528
1529         OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1530         ctx->lc_value = NULL;
1531 }
1532
1533 static int keys_fill(struct lu_context *ctx)
1534 {
1535         int i;
1536
1537         LINVRNT(ctx->lc_value != NULL);
1538         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1539                 struct lu_context_key *key;
1540
1541                 key = lu_keys[i];
1542                 if (ctx->lc_value[i] == NULL && key != NULL &&
1543                     (key->lct_tags & ctx->lc_tags) &&
1544                     /*
1545                      * Don't create values for a LCT_QUIESCENT key, as this
1546                      * will pin module owning a key.
1547                      */
1548                     !(key->lct_tags & LCT_QUIESCENT)) {
1549                         void *value;
1550
1551                         LINVRNT(key->lct_init != NULL);
1552                         LINVRNT(key->lct_index == i);
1553
1554                         value = key->lct_init(ctx, key);
1555                         if (unlikely(IS_ERR(value)))
1556                                 return PTR_ERR(value);
1557
1558                         if (!(ctx->lc_tags & LCT_NOREF))
1559                                 try_module_get(key->lct_owner);
1560                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1561                         atomic_inc(&key->lct_used);
1562                         /*
1563                          * This is the only place in the code, where an
1564                          * element of ctx->lc_value[] array is set to non-NULL
1565                          * value.
1566                          */
1567                         ctx->lc_value[i] = value;
1568                         if (key->lct_exit != NULL)
1569                                 ctx->lc_tags |= LCT_HAS_EXIT;
1570                 }
1571                 ctx->lc_version = key_set_version;
1572         }
1573         return 0;
1574 }
1575
1576 static int keys_init(struct lu_context *ctx)
1577 {
1578         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1579         if (likely(ctx->lc_value != NULL))
1580                 return keys_fill(ctx);
1581
1582         return -ENOMEM;
1583 }
1584
1585 /**
1586  * Initialize context data-structure. Create values for all keys.
1587  */
1588 int lu_context_init(struct lu_context *ctx, __u32 tags)
1589 {
1590         int     rc;
1591
1592         memset(ctx, 0, sizeof *ctx);
1593         ctx->lc_state = LCS_INITIALIZED;
1594         ctx->lc_tags = tags;
1595         if (tags & LCT_REMEMBER) {
1596                 spin_lock(&lu_keys_guard);
1597                 list_add(&ctx->lc_remember, &lu_context_remembered);
1598                 spin_unlock(&lu_keys_guard);
1599         } else {
1600                 INIT_LIST_HEAD(&ctx->lc_remember);
1601         }
1602
1603         rc = keys_init(ctx);
1604         if (rc != 0)
1605                 lu_context_fini(ctx);
1606
1607         return rc;
1608 }
1609 EXPORT_SYMBOL(lu_context_init);
1610
1611 /**
1612  * Finalize context data-structure. Destroy key values.
1613  */
1614 void lu_context_fini(struct lu_context *ctx)
1615 {
1616         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1617         ctx->lc_state = LCS_FINALIZED;
1618
1619         if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1620                 LASSERT(list_empty(&ctx->lc_remember));
1621                 keys_fini(ctx);
1622
1623         } else { /* could race with key degister */
1624                 spin_lock(&lu_keys_guard);
1625                 keys_fini(ctx);
1626                 list_del_init(&ctx->lc_remember);
1627                 spin_unlock(&lu_keys_guard);
1628         }
1629 }
1630 EXPORT_SYMBOL(lu_context_fini);
1631
1632 /**
1633  * Called before entering context.
1634  */
1635 void lu_context_enter(struct lu_context *ctx)
1636 {
1637         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1638         ctx->lc_state = LCS_ENTERED;
1639 }
1640 EXPORT_SYMBOL(lu_context_enter);
1641
1642 /**
1643  * Called after exiting from \a ctx
1644  */
1645 void lu_context_exit(struct lu_context *ctx)
1646 {
1647         int i;
1648
1649         LINVRNT(ctx->lc_state == LCS_ENTERED);
1650         ctx->lc_state = LCS_LEFT;
1651         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1652                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1653                         if (ctx->lc_value[i] != NULL) {
1654                                 struct lu_context_key *key;
1655
1656                                 key = lu_keys[i];
1657                                 LASSERT(key != NULL);
1658                                 if (key->lct_exit != NULL)
1659                                         key->lct_exit(ctx,
1660                                                       key, ctx->lc_value[i]);
1661                         }
1662                 }
1663         }
1664 }
1665 EXPORT_SYMBOL(lu_context_exit);
1666
1667 /**
1668  * Allocate for context all missing keys that were registered after context
1669  * creation. key_set_version is only changed in rare cases when modules
1670  * are loaded and removed.
1671  */
1672 int lu_context_refill(struct lu_context *ctx)
1673 {
1674         return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1675 }
1676 EXPORT_SYMBOL(lu_context_refill);
1677
1678 /**
1679  * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1680  * obd being added. Currently, this is only used on client side, specifically
1681  * for echo device client, for other stack (like ptlrpc threads), context are
1682  * predefined when the lu_device type are registered, during the module probe
1683  * phase.
1684  */
1685 __u32 lu_context_tags_default = 0;
1686 __u32 lu_session_tags_default = 0;
1687
1688 void lu_context_tags_update(__u32 tags)
1689 {
1690         spin_lock(&lu_keys_guard);
1691         lu_context_tags_default |= tags;
1692         key_set_version++;
1693         spin_unlock(&lu_keys_guard);
1694 }
1695 EXPORT_SYMBOL(lu_context_tags_update);
1696
1697 void lu_context_tags_clear(__u32 tags)
1698 {
1699         spin_lock(&lu_keys_guard);
1700         lu_context_tags_default &= ~tags;
1701         key_set_version++;
1702         spin_unlock(&lu_keys_guard);
1703 }
1704 EXPORT_SYMBOL(lu_context_tags_clear);
1705
1706 void lu_session_tags_update(__u32 tags)
1707 {
1708         spin_lock(&lu_keys_guard);
1709         lu_session_tags_default |= tags;
1710         key_set_version++;
1711         spin_unlock(&lu_keys_guard);
1712 }
1713 EXPORT_SYMBOL(lu_session_tags_update);
1714
1715 void lu_session_tags_clear(__u32 tags)
1716 {
1717         spin_lock(&lu_keys_guard);
1718         lu_session_tags_default &= ~tags;
1719         key_set_version++;
1720         spin_unlock(&lu_keys_guard);
1721 }
1722 EXPORT_SYMBOL(lu_session_tags_clear);
1723
1724 int lu_env_init(struct lu_env *env, __u32 tags)
1725 {
1726         int result;
1727
1728         env->le_ses = NULL;
1729         result = lu_context_init(&env->le_ctx, tags);
1730         if (likely(result == 0))
1731                 lu_context_enter(&env->le_ctx);
1732         return result;
1733 }
1734 EXPORT_SYMBOL(lu_env_init);
1735
1736 void lu_env_fini(struct lu_env *env)
1737 {
1738         lu_context_exit(&env->le_ctx);
1739         lu_context_fini(&env->le_ctx);
1740         env->le_ses = NULL;
1741 }
1742 EXPORT_SYMBOL(lu_env_fini);
1743
1744 int lu_env_refill(struct lu_env *env)
1745 {
1746         int result;
1747
1748         result = lu_context_refill(&env->le_ctx);
1749         if (result == 0 && env->le_ses != NULL)
1750                 result = lu_context_refill(env->le_ses);
1751         return result;
1752 }
1753 EXPORT_SYMBOL(lu_env_refill);
1754
1755 /**
1756  * Currently, this API will only be used by echo client.
1757  * Because echo client and normal lustre client will share
1758  * same cl_env cache. So echo client needs to refresh
1759  * the env context after it get one from the cache, especially
1760  * when normal client and echo client co-exist in the same client.
1761  */
1762 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1763                           __u32 stags)
1764 {
1765         int    result;
1766
1767         if ((env->le_ctx.lc_tags & ctags) != ctags) {
1768                 env->le_ctx.lc_version = 0;
1769                 env->le_ctx.lc_tags |= ctags;
1770         }
1771
1772         if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1773                 env->le_ses->lc_version = 0;
1774                 env->le_ses->lc_tags |= stags;
1775         }
1776
1777         result = lu_env_refill(env);
1778
1779         return result;
1780 }
1781 EXPORT_SYMBOL(lu_env_refill_by_tags);
1782
1783 static struct shrinker *lu_site_shrinker = NULL;
1784
1785 typedef struct lu_site_stats{
1786         unsigned        lss_populated;
1787         unsigned        lss_max_search;
1788         unsigned        lss_total;
1789         unsigned        lss_busy;
1790 } lu_site_stats_t;
1791
1792 static void lu_site_stats_get(cfs_hash_t *hs,
1793                               lu_site_stats_t *stats, int populated)
1794 {
1795         cfs_hash_bd_t bd;
1796         int        i;
1797
1798         cfs_hash_for_each_bucket(hs, &bd, i) {
1799                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1800                 struct hlist_head       *hhead;
1801
1802                 cfs_hash_bd_lock(hs, &bd, 1);
1803                 stats->lss_busy  += bkt->lsb_busy;
1804                 stats->lss_total += cfs_hash_bd_count_get(&bd);
1805                 stats->lss_max_search = max((int)stats->lss_max_search,
1806                                             cfs_hash_bd_depmax_get(&bd));
1807                 if (!populated) {
1808                         cfs_hash_bd_unlock(hs, &bd, 1);
1809                         continue;
1810                 }
1811
1812                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1813                         if (!hlist_empty(hhead))
1814                                 stats->lss_populated++;
1815                 }
1816                 cfs_hash_bd_unlock(hs, &bd, 1);
1817         }
1818 }
1819
1820
1821 /*
1822  * There exists a potential lock inversion deadlock scenario when using
1823  * Lustre on top of ZFS. This occurs between one of ZFS's
1824  * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1825  * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1826  * while thread B will take the ht_lock and sleep on the lu_sites_guard
1827  * lock. Obviously neither thread will wake and drop their respective hold
1828  * on their lock.
1829  *
1830  * To prevent this from happening we must ensure the lu_sites_guard lock is
1831  * not taken while down this code path. ZFS reliably does not set the
1832  * __GFP_FS bit in its code paths, so this can be used to determine if it
1833  * is safe to take the lu_sites_guard lock.
1834  *
1835  * Ideally we should accurately return the remaining number of cached
1836  * objects without taking the  lu_sites_guard lock, but this is not
1837  * possible in the current implementation.
1838  */
1839 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1840 {
1841         lu_site_stats_t stats;
1842         struct lu_site *s;
1843         struct lu_site *tmp;
1844         int cached = 0;
1845         int remain = shrink_param(sc, nr_to_scan);
1846         LIST_HEAD(splice);
1847
1848         if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1849                 if (remain != 0)
1850                         return -1;
1851                 else
1852                         /* We must not take the lu_sites_guard lock when
1853                          * __GFP_FS is *not* set because of the deadlock
1854                          * possibility detailed above. Additionally,
1855                          * since we cannot determine the number of
1856                          * objects in the cache without taking this
1857                          * lock, we're in a particularly tough spot. As
1858                          * a result, we'll just lie and say our cache is
1859                          * empty. This _should_ be ok, as we can't
1860                          * reclaim objects when __GFP_FS is *not* set
1861                          * anyways.
1862                          */
1863                         return 0;
1864         }
1865
1866         CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1867
1868         mutex_lock(&lu_sites_guard);
1869         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1870                 if (shrink_param(sc, nr_to_scan) != 0) {
1871                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1872                         /*
1873                          * Move just shrunk site to the tail of site list to
1874                          * assure shrinking fairness.
1875                          */
1876                         list_move_tail(&s->ls_linkage, &splice);
1877                 }
1878
1879                 memset(&stats, 0, sizeof(stats));
1880                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1881                 cached += stats.lss_total - stats.lss_busy;
1882                 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1883                         break;
1884         }
1885         list_splice(&splice, lu_sites.prev);
1886         mutex_unlock(&lu_sites_guard);
1887
1888         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1889         if (shrink_param(sc, nr_to_scan) == 0)
1890                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1891         return cached;
1892 }
1893
1894 /*
1895  * Debugging stuff.
1896  */
1897
1898 /**
1899  * Environment to be used in debugger, contains all tags.
1900  */
1901 struct lu_env lu_debugging_env;
1902
1903 /**
1904  * Debugging printer function using printk().
1905  */
1906 int lu_printk_printer(const struct lu_env *env,
1907                       void *unused, const char *format, ...)
1908 {
1909         va_list args;
1910
1911         va_start(args, format);
1912         vprintk(format, args);
1913         va_end(args);
1914         return 0;
1915 }
1916
1917 /**
1918  * Initialization of global lu_* data.
1919  */
1920 int lu_global_init(void)
1921 {
1922         int result;
1923
1924         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1925
1926         result = lu_ref_global_init();
1927         if (result != 0)
1928                 return result;
1929
1930         LU_CONTEXT_KEY_INIT(&lu_global_key);
1931         result = lu_context_key_register(&lu_global_key);
1932         if (result != 0)
1933                 return result;
1934
1935         /*
1936          * At this level, we don't know what tags are needed, so allocate them
1937          * conservatively. This should not be too bad, because this
1938          * environment is global.
1939          */
1940         mutex_lock(&lu_sites_guard);
1941         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1942         mutex_unlock(&lu_sites_guard);
1943         if (result != 0)
1944                 return result;
1945
1946         /*
1947          * seeks estimation: 3 seeks to read a record from oi, one to read
1948          * inode, one for ea. Unfortunately setting this high value results in
1949          * lu_object/inode cache consuming all the memory.
1950          */
1951         lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
1952         if (lu_site_shrinker == NULL)
1953                 return -ENOMEM;
1954
1955         return result;
1956 }
1957
1958 /**
1959  * Dual to lu_global_init().
1960  */
1961 void lu_global_fini(void)
1962 {
1963         if (lu_site_shrinker != NULL) {
1964                 remove_shrinker(lu_site_shrinker);
1965                 lu_site_shrinker = NULL;
1966         }
1967
1968         lu_context_key_degister(&lu_global_key);
1969
1970         /*
1971          * Tear shrinker environment down _after_ de-registering
1972          * lu_global_key, because the latter has a value in the former.
1973          */
1974         mutex_lock(&lu_sites_guard);
1975         lu_env_fini(&lu_shrink_env);
1976         mutex_unlock(&lu_sites_guard);
1977
1978         lu_ref_global_fini();
1979 }
1980
1981 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1982 {
1983 #ifdef LPROCFS
1984         struct lprocfs_counter ret;
1985
1986         lprocfs_stats_collect(stats, idx, &ret);
1987         return (__u32)ret.lc_count;
1988 #else
1989         return 0;
1990 #endif
1991 }
1992
1993 /**
1994  * Output site statistical counters into a buffer. Suitable for
1995  * lprocfs_rd_*()-style functions.
1996  */
1997 int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
1998 {
1999         lu_site_stats_t stats;
2000
2001         memset(&stats, 0, sizeof(stats));
2002         lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2003
2004         return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2005                         stats.lss_busy,
2006                         stats.lss_total,
2007                         stats.lss_populated,
2008                         CFS_HASH_NHLIST(s->ls_obj_hash),
2009                         stats.lss_max_search,
2010                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
2011                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2012                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2013                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2014                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2015                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2016 }
2017 EXPORT_SYMBOL(lu_site_stats_print);
2018
2019 /**
2020  * Helper function to initialize a number of kmem slab caches at once.
2021  */
2022 int lu_kmem_init(struct lu_kmem_descr *caches)
2023 {
2024         int result;
2025         struct lu_kmem_descr *iter = caches;
2026
2027         for (result = 0; iter->ckd_cache != NULL; ++iter) {
2028                 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2029                                                         iter->ckd_size,
2030                                                         0, 0, NULL);
2031                 if (*iter->ckd_cache == NULL) {
2032                         result = -ENOMEM;
2033                         /* free all previously allocated caches */
2034                         lu_kmem_fini(caches);
2035                         break;
2036                 }
2037         }
2038         return result;
2039 }
2040 EXPORT_SYMBOL(lu_kmem_init);
2041
2042 /**
2043  * Helper function to finalize a number of kmem slab cached at once. Dual to
2044  * lu_kmem_init().
2045  */
2046 void lu_kmem_fini(struct lu_kmem_descr *caches)
2047 {
2048         for (; caches->ckd_cache != NULL; ++caches) {
2049                 if (*caches->ckd_cache != NULL) {
2050                         kmem_cache_destroy(*caches->ckd_cache);
2051                         *caches->ckd_cache = NULL;
2052                 }
2053         }
2054 }
2055 EXPORT_SYMBOL(lu_kmem_fini);
2056
2057 /**
2058  * Temporary solution to be able to assign fid in ->do_create()
2059  * till we have fully-functional OST fids
2060  */
2061 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2062                           const struct lu_fid *fid)
2063 {
2064         struct lu_site          *s = o->lo_dev->ld_site;
2065         struct lu_fid           *old = &o->lo_header->loh_fid;
2066         struct lu_site_bkt_data *bkt;
2067         struct lu_object        *shadow;
2068         wait_queue_t             waiter;
2069         cfs_hash_t              *hs;
2070         cfs_hash_bd_t            bd;
2071         __u64                    version = 0;
2072
2073         LASSERT(fid_is_zero(old));
2074
2075         hs = s->ls_obj_hash;
2076         cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2077         shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2078         /* supposed to be unique */
2079         LASSERT(shadow == NULL);
2080         *old = *fid;
2081         bkt = cfs_hash_bd_extra_get(hs, &bd);
2082         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2083         bkt->lsb_busy++;
2084         cfs_hash_bd_unlock(hs, &bd, 1);
2085 }
2086 EXPORT_SYMBOL(lu_object_assign_fid);
2087
2088 /**
2089  * allocates object with 0 (non-assiged) fid
2090  * XXX: temporary solution to be able to assign fid in ->do_create()
2091  *      till we have fully-functional OST fids
2092  */
2093 struct lu_object *lu_object_anon(const struct lu_env *env,
2094                                  struct lu_device *dev,
2095                                  const struct lu_object_conf *conf)
2096 {
2097         struct lu_fid     fid;
2098         struct lu_object *o;
2099
2100         fid_zero(&fid);
2101         o = lu_object_alloc(env, dev, &fid, conf);
2102
2103         return o;
2104 }
2105 EXPORT_SYMBOL(lu_object_anon);
2106
2107 struct lu_buf LU_BUF_NULL = {
2108         .lb_buf = NULL,
2109         .lb_len = 0
2110 };
2111 EXPORT_SYMBOL(LU_BUF_NULL);
2112
2113 void lu_buf_free(struct lu_buf *buf)
2114 {
2115         LASSERT(buf);
2116         if (buf->lb_buf) {
2117                 LASSERT(buf->lb_len > 0);
2118                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2119                 buf->lb_buf = NULL;
2120                 buf->lb_len = 0;
2121         }
2122 }
2123 EXPORT_SYMBOL(lu_buf_free);
2124
2125 void lu_buf_alloc(struct lu_buf *buf, int size)
2126 {
2127         LASSERT(buf);
2128         LASSERT(buf->lb_buf == NULL);
2129         LASSERT(buf->lb_len == 0);
2130         OBD_ALLOC_LARGE(buf->lb_buf, size);
2131         if (likely(buf->lb_buf))
2132                 buf->lb_len = size;
2133 }
2134 EXPORT_SYMBOL(lu_buf_alloc);
2135
2136 void lu_buf_realloc(struct lu_buf *buf, int size)
2137 {
2138         lu_buf_free(buf);
2139         lu_buf_alloc(buf, size);
2140 }
2141 EXPORT_SYMBOL(lu_buf_realloc);
2142
2143 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2144 {
2145         if (buf->lb_buf == NULL && buf->lb_len == 0)
2146                 lu_buf_alloc(buf, len);
2147
2148         if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2149                 lu_buf_realloc(buf, len);
2150
2151         return buf;
2152 }
2153 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2154
2155 /**
2156  * Increase the size of the \a buf.
2157  * preserves old data in buffer
2158  * old buffer remains unchanged on error
2159  * \retval 0 or -ENOMEM
2160  */
2161 int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2162 {
2163         char *ptr;
2164
2165         if (len <= buf->lb_len)
2166                 return 0;
2167
2168         OBD_ALLOC_LARGE(ptr, len);
2169         if (ptr == NULL)
2170                 return -ENOMEM;
2171
2172         /* Free the old buf */
2173         if (buf->lb_buf != NULL) {
2174                 memcpy(ptr, buf->lb_buf, buf->lb_len);
2175                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2176         }
2177
2178         buf->lb_buf = ptr;
2179         buf->lb_len = len;
2180         return 0;
2181 }
2182 EXPORT_SYMBOL(lu_buf_check_and_grow);