4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_OSC
43 #include "osc_cl_internal.h"
45 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
46 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
47 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
48 struct osc_page *opg);
55 * Comment out osc_page_protected because it may sleep inside the
56 * the client_obd_list_lock.
57 * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
58 * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
59 * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
62 static int osc_page_is_dlocked(const struct lu_env *env,
63 const struct osc_page *opg,
64 enum cl_lock_mode mode, int pending, int unref)
67 struct osc_object *obj;
68 struct osc_thread_info *info;
69 struct ldlm_res_id *resname;
70 struct lustre_handle *lockh;
71 ldlm_policy_data_t *policy;
77 info = osc_env_info(env);
78 resname = &info->oti_resname;
79 policy = &info->oti_policy;
80 lockh = &info->oti_handle;
81 page = opg->ops_cl.cpl_page;
82 obj = cl2osc(opg->ops_cl.cpl_obj);
84 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
86 flags |= LDLM_FL_CBPENDING;
88 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
89 osc_lock_build_res(env, obj, resname);
90 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
91 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
92 dlmmode, &flags, NULL, lockh, unref);
96 * Checks an invariant that a page in the cache is covered by a lock, as
99 static int osc_page_protected(const struct lu_env *env,
100 const struct osc_page *opg,
101 enum cl_lock_mode mode, int unref)
103 struct cl_object_header *hdr;
104 struct cl_lock *scan;
105 struct cl_page *page;
106 struct cl_lock_descr *descr;
109 LINVRNT(!opg->ops_temp);
111 page = opg->ops_cl.cpl_page;
112 if (page->cp_owner != NULL &&
113 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
115 * If IO is done without locks (liblustre, or lloop), lock is
120 /* otherwise check for a DLM lock */
121 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
123 /* maybe this page is a part of a lockless io? */
124 hdr = cl_object_header(opg->ops_cl.cpl_obj);
125 descr = &osc_env_info(env)->oti_descr;
126 descr->cld_mode = mode;
127 descr->cld_start = page->cp_index;
128 descr->cld_end = page->cp_index;
129 spin_lock(&hdr->coh_lock_guard);
130 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
132 * Lock-less sub-lock has to be either in HELD state
133 * (when io is actively going on), or in CACHED state,
134 * when top-lock is being unlocked:
135 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
137 if ((scan->cll_state == CLS_HELD ||
138 scan->cll_state == CLS_CACHED) &&
139 cl_lock_ext_match(&scan->cll_descr, descr)) {
140 struct osc_lock *olck;
142 olck = osc_lock_at(scan);
143 result = osc_lock_is_lockless(olck);
147 spin_unlock(&hdr->coh_lock_guard);
152 static int osc_page_protected(const struct lu_env *env,
153 const struct osc_page *opg,
154 enum cl_lock_mode mode, int unref)
160 /*****************************************************************************
165 static void osc_page_fini(const struct lu_env *env,
166 struct cl_page_slice *slice)
168 struct osc_page *opg = cl2osc_page(slice);
169 CDEBUG(D_TRACE, "%p\n", opg);
170 LASSERT(opg->ops_lock == NULL);
173 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
175 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
177 LASSERT(!opg->ops_transfer_pinned);
179 lu_ref_add_atomic(&page->cp_reference, label, page);
180 opg->ops_transfer_pinned = 1;
183 static void osc_page_transfer_put(const struct lu_env *env,
184 struct osc_page *opg)
186 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
188 if (opg->ops_transfer_pinned) {
189 lu_ref_del(&page->cp_reference, "transfer", page);
190 opg->ops_transfer_pinned = 0;
191 cl_page_put(env, page);
196 * This is called once for every page when it is submitted for a transfer
197 * either opportunistic (osc_page_cache_add()), or immediate
198 * (osc_page_submit()).
200 static void osc_page_transfer_add(const struct lu_env *env,
201 struct osc_page *opg, enum cl_req_type crt)
203 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
205 /* ops_lru and ops_inflight share the same field, so take it from LRU
206 * first and then use it as inflight. */
207 osc_lru_del(osc_cli(obj), opg, false);
209 spin_lock(&obj->oo_seatbelt);
210 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
211 opg->ops_submitter = current;
212 spin_unlock(&obj->oo_seatbelt);
215 static int osc_page_cache_add(const struct lu_env *env,
216 const struct cl_page_slice *slice,
219 struct osc_io *oio = osc_env_io(env);
220 struct osc_page *opg = cl2osc_page(slice);
224 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
226 osc_page_transfer_get(opg, "transfer\0cache");
227 result = osc_queue_async_io(env, io, opg);
229 osc_page_transfer_put(env, opg);
231 osc_page_transfer_add(env, opg, CRT_WRITE);
233 /* for sync write, kernel will wait for this page to be flushed before
234 * osc_io_end() is called, so release it earlier.
235 * for mkwrite(), it's known there is no further pages. */
236 if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
237 if (oio->oi_active != NULL) {
238 osc_extent_release(env, oio->oi_active);
239 oio->oi_active = NULL;
246 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
247 pgoff_t start, pgoff_t end)
249 memset(policy, 0, sizeof *policy);
250 policy->l_extent.start = cl_offset(obj, start);
251 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
254 static int osc_page_addref_lock(const struct lu_env *env,
255 struct osc_page *opg,
256 struct cl_lock *lock)
258 struct osc_lock *olock;
261 LASSERT(opg->ops_lock == NULL);
263 olock = osc_lock_at(lock);
264 if (atomic_inc_return(&olock->ols_pageref) <= 0) {
265 atomic_dec(&olock->ols_pageref);
269 opg->ops_lock = lock;
275 static void osc_page_putref_lock(const struct lu_env *env,
276 struct osc_page *opg)
278 struct cl_lock *lock = opg->ops_lock;
279 struct osc_lock *olock;
281 LASSERT(lock != NULL);
282 olock = osc_lock_at(lock);
284 atomic_dec(&olock->ols_pageref);
285 opg->ops_lock = NULL;
287 cl_lock_put(env, lock);
290 static int osc_page_is_under_lock(const struct lu_env *env,
291 const struct cl_page_slice *slice,
292 struct cl_io *unused)
294 struct cl_lock *lock;
295 int result = -ENODATA;
298 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
301 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
303 cl_lock_put(env, lock);
308 static void osc_page_disown(const struct lu_env *env,
309 const struct cl_page_slice *slice,
312 struct osc_page *opg = cl2osc_page(slice);
314 if (unlikely(opg->ops_lock))
315 osc_page_putref_lock(env, opg);
318 static void osc_page_completion_read(const struct lu_env *env,
319 const struct cl_page_slice *slice,
322 struct osc_page *opg = cl2osc_page(slice);
323 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
325 if (likely(opg->ops_lock))
326 osc_page_putref_lock(env, opg);
327 osc_lru_add(osc_cli(obj), opg);
330 static void osc_page_completion_write(const struct lu_env *env,
331 const struct cl_page_slice *slice,
334 struct osc_page *opg = cl2osc_page(slice);
335 struct osc_object *obj = cl2osc(slice->cpl_obj);
337 osc_lru_add(osc_cli(obj), opg);
340 static int osc_page_fail(const struct lu_env *env,
341 const struct cl_page_slice *slice,
342 struct cl_io *unused)
352 static const char *osc_list(struct list_head *head)
354 return list_empty(head) ? "-" : "+";
357 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
359 if (opg->ops_submit_time == 0)
362 return (cfs_time_current() - opg->ops_submit_time);
365 static int osc_page_print(const struct lu_env *env,
366 const struct cl_page_slice *slice,
367 void *cookie, lu_printer_t printer)
369 struct osc_page *opg = cl2osc_page(slice);
370 struct osc_async_page *oap = &opg->ops_oap;
371 struct osc_object *obj = cl2osc(slice->cpl_obj);
372 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
374 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
375 "1< %#x %d %u %s %s > "
376 "2< "LPU64" %u %u %#x %#x | %p %p %p > "
377 "3< %s %p %d %lu %d > "
378 "4< %d %d %d %lu %s | %s %s %s %s > "
379 "5< %s %s %s %s | %d %s | %d %s %s>\n",
382 oap->oap_magic, oap->oap_cmd,
383 oap->oap_interrupted,
384 osc_list(&oap->oap_pending_item),
385 osc_list(&oap->oap_rpc_item),
387 oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
388 oap->oap_async_flags, oap->oap_brw_flags,
389 oap->oap_request, oap->oap_cli, obj,
391 osc_list(&opg->ops_inflight),
392 opg->ops_submitter, opg->ops_transfer_pinned,
393 osc_submit_duration(opg), opg->ops_srvlock,
395 cli->cl_r_in_flight, cli->cl_w_in_flight,
396 cli->cl_max_rpcs_in_flight,
398 osc_list(&cli->cl_cache_waiters),
399 osc_list(&cli->cl_loi_ready_list),
400 osc_list(&cli->cl_loi_hp_ready_list),
401 osc_list(&cli->cl_loi_write_list),
402 osc_list(&cli->cl_loi_read_list),
404 osc_list(&obj->oo_ready_item),
405 osc_list(&obj->oo_hp_ready_item),
406 osc_list(&obj->oo_write_item),
407 osc_list(&obj->oo_read_item),
408 atomic_read(&obj->oo_nr_reads),
409 osc_list(&obj->oo_reading_exts),
410 atomic_read(&obj->oo_nr_writes),
411 osc_list(&obj->oo_hp_exts),
412 osc_list(&obj->oo_urgent_exts));
415 static void osc_page_delete(const struct lu_env *env,
416 const struct cl_page_slice *slice)
418 struct osc_page *opg = cl2osc_page(slice);
419 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
422 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
425 CDEBUG(D_TRACE, "%p\n", opg);
426 osc_page_transfer_put(env, opg);
427 rc = osc_teardown_async_page(env, obj, opg);
429 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
430 "Trying to teardown failed: %d\n", rc);
434 spin_lock(&obj->oo_seatbelt);
435 if (opg->ops_submitter != NULL) {
436 LASSERT(!list_empty(&opg->ops_inflight));
437 list_del_init(&opg->ops_inflight);
438 opg->ops_submitter = NULL;
440 spin_unlock(&obj->oo_seatbelt);
442 osc_lru_del(osc_cli(obj), opg, true);
446 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
449 struct osc_page *opg = cl2osc_page(slice);
450 struct osc_async_page *oap = &opg->ops_oap;
452 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
454 opg->ops_from = from;
456 spin_lock(&oap->oap_lock);
457 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
458 spin_unlock(&oap->oap_lock);
461 static int osc_page_cancel(const struct lu_env *env,
462 const struct cl_page_slice *slice)
464 struct osc_page *opg = cl2osc_page(slice);
467 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
469 /* Check if the transferring against this page
470 * is completed, or not even queued. */
471 if (opg->ops_transfer_pinned)
472 /* FIXME: may not be interrupted.. */
473 rc = osc_cancel_async_page(env, opg);
474 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
478 static int osc_page_flush(const struct lu_env *env,
479 const struct cl_page_slice *slice,
482 struct osc_page *opg = cl2osc_page(slice);
485 rc = osc_flush_async_page(env, io, opg);
489 static const struct cl_page_operations osc_page_ops = {
490 .cpo_fini = osc_page_fini,
491 .cpo_print = osc_page_print,
492 .cpo_delete = osc_page_delete,
493 .cpo_is_under_lock = osc_page_is_under_lock,
494 .cpo_disown = osc_page_disown,
497 .cpo_cache_add = osc_page_fail,
498 .cpo_completion = osc_page_completion_read
501 .cpo_cache_add = osc_page_cache_add,
502 .cpo_completion = osc_page_completion_write
505 .cpo_clip = osc_page_clip,
506 .cpo_cancel = osc_page_cancel,
507 .cpo_flush = osc_page_flush
510 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
511 struct cl_page *page, struct page *vmpage)
513 struct osc_object *osc = cl2osc(obj);
514 struct osc_page *opg = cl_object_page_slice(obj, page);
518 opg->ops_to = PAGE_CACHE_SIZE;
520 result = osc_prep_async_page(osc, opg, vmpage,
521 cl_offset(obj, page->cp_index));
523 struct osc_io *oio = osc_env_io(env);
524 opg->ops_srvlock = osc_io_srvlock(oio);
525 cl_page_slice_add(page, &opg->ops_cl, obj,
529 * Cannot assert osc_page_protected() here as read-ahead
530 * creates temporary pages outside of a lock.
532 /* ops_inflight and ops_lru are the same field, but it doesn't
533 * hurt to initialize it twice :-) */
534 INIT_LIST_HEAD(&opg->ops_inflight);
535 INIT_LIST_HEAD(&opg->ops_lru);
537 /* reserve an LRU space for this page */
538 if (page->cp_type == CPT_CACHEABLE && result == 0)
539 result = osc_lru_reserve(env, osc, opg);
545 * Helper function called by osc_io_submit() for every page in an immediate
546 * transfer (i.e., transferred synchronously).
548 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
549 enum cl_req_type crt, int brw_flags)
551 struct osc_async_page *oap = &opg->ops_oap;
552 struct osc_object *obj = oap->oap_obj;
554 LINVRNT(osc_page_protected(env, opg,
555 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
557 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
558 "magic 0x%x\n", oap, oap->oap_magic);
559 LASSERT(oap->oap_async_flags & ASYNC_READY);
560 LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
562 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
563 oap->oap_page_off = opg->ops_from;
564 oap->oap_count = opg->ops_to - opg->ops_from;
565 oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
567 if (!client_is_remote(osc_export(obj)) &&
568 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
569 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
570 oap->oap_cmd |= OBD_BRW_NOQUOTA;
573 opg->ops_submit_time = cfs_time_current();
574 osc_page_transfer_get(opg, "transfer\0imm");
575 osc_page_transfer_add(env, opg, crt);
578 /* --------------- LRU page management ------------------ */
580 /* OSC is a natural place to manage LRU pages as applications are specialized
581 * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
582 * occupy more LRU slots. On the other hand, we should avoid using up all LRU
583 * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
584 * for free LRU slots - this will be very bad so the algorithm requires each
585 * OSC to free slots voluntarily to maintain a reasonable number of free slots
589 static CFS_DECL_WAITQ(osc_lru_waitq);
590 static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
591 /* LRU pages are freed in batch mode. OSC should at least free this
592 * number of pages to avoid running out of LRU budget, and.. */
593 static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
594 /* free this number at most otherwise it will take too long time to finsih. */
595 static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
597 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
598 * we should free slots aggressively. In this way, slots are freed in a steady
599 * step to maintain fairness among OSCs.
601 * Return how many LRU pages should be freed. */
602 static int osc_cache_too_much(struct client_obd *cli)
604 struct cl_client_cache *cache = cli->cl_cache;
605 int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
607 if (atomic_read(&osc_lru_waiters) > 0 &&
608 atomic_read(cli->cl_lru_left) < lru_shrink_max)
609 /* drop lru pages aggressively */
610 return min(pages, lru_shrink_max);
612 /* if it's going to run out LRU slots, we should free some, but not
613 * too much to maintain faireness among OSCs. */
614 if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
617 tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
619 return min(pages, lru_shrink_max);
621 return pages > lru_shrink_min ? lru_shrink_min : 0;
627 /* Return how many pages are not discarded in @pvec. */
628 static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
629 struct cl_page **pvec, int max_index)
634 for (count = 0, i = 0; i < max_index; i++) {
635 struct cl_page *page = pvec[i];
636 if (cl_page_own_try(env, io, page) == 0) {
637 /* free LRU page only if nobody is using it.
638 * This check is necessary to avoid freeing the pages
639 * having already been removed from LRU and pinned
641 if (!cl_page_in_use(page)) {
642 cl_page_unmap(env, io, page);
643 cl_page_discard(env, io, page);
646 cl_page_disown(env, io, page);
648 cl_page_put(env, page);
651 return max_index - count;
655 * Drop @target of pages from LRU at most.
657 int osc_lru_shrink(struct client_obd *cli, int target)
659 struct cl_env_nest nest;
662 struct cl_object *clobj = NULL;
663 struct cl_page **pvec;
664 struct osc_page *opg;
671 LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
672 if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
675 env = cl_env_nested_get(&nest);
677 RETURN(PTR_ERR(env));
679 pvec = osc_env_info(env)->oti_pvec;
680 io = &osc_env_info(env)->oti_io;
682 client_obd_list_lock(&cli->cl_lru_list_lock);
683 atomic_inc(&cli->cl_lru_shrinkers);
684 maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
685 while (!list_empty(&cli->cl_lru_list)) {
686 struct cl_page *page;
691 opg = list_entry(cli->cl_lru_list.next, struct osc_page,
693 page = cl_page_top(opg->ops_cl.cpl_page);
694 if (cl_page_in_use_noref(page)) {
695 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
699 LASSERT(page->cp_obj != NULL);
700 if (clobj != page->cp_obj) {
701 struct cl_object *tmp = page->cp_obj;
704 client_obd_list_unlock(&cli->cl_lru_list_lock);
707 count -= discard_pagevec(env, io, pvec, index);
711 cl_object_put(env, clobj);
717 io->ci_ignore_layout = 1;
718 rc = cl_io_init(env, io, CIT_MISC, clobj);
720 client_obd_list_lock(&cli->cl_lru_list_lock);
729 /* move this page to the end of list as it will be discarded
730 * soon. The page will be finally removed from LRU list in
731 * osc_page_delete(). */
732 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
734 /* it's okay to grab a refcount here w/o holding lock because
735 * it has to grab cl_lru_list_lock to delete the page. */
737 pvec[index++] = page;
738 if (++count >= target)
741 if (unlikely(index == OTI_PVEC_SIZE)) {
742 client_obd_list_unlock(&cli->cl_lru_list_lock);
743 count -= discard_pagevec(env, io, pvec, index);
746 client_obd_list_lock(&cli->cl_lru_list_lock);
749 client_obd_list_unlock(&cli->cl_lru_list_lock);
752 count -= discard_pagevec(env, io, pvec, index);
755 cl_object_put(env, clobj);
757 cl_env_nested_put(&nest, env);
759 atomic_dec(&cli->cl_lru_shrinkers);
760 RETURN(count > 0 ? count : rc);
763 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
767 if (!opg->ops_in_lru)
770 atomic_dec(&cli->cl_lru_busy);
771 client_obd_list_lock(&cli->cl_lru_list_lock);
772 if (list_empty(&opg->ops_lru)) {
773 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
774 atomic_inc_return(&cli->cl_lru_in_list);
775 wakeup = atomic_read(&osc_lru_waiters) > 0;
777 client_obd_list_unlock(&cli->cl_lru_list_lock);
780 osc_lru_shrink(cli, osc_cache_too_much(cli));
781 wake_up_all(&osc_lru_waitq);
785 /* delete page from LRUlist. The page can be deleted from LRUlist for two
786 * reasons: redirtied or deleted from page cache. */
787 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
789 if (opg->ops_in_lru) {
790 client_obd_list_lock(&cli->cl_lru_list_lock);
791 if (!list_empty(&opg->ops_lru)) {
792 LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
793 list_del_init(&opg->ops_lru);
794 atomic_dec(&cli->cl_lru_in_list);
796 atomic_inc(&cli->cl_lru_busy);
798 LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
799 atomic_dec(&cli->cl_lru_busy);
801 client_obd_list_unlock(&cli->cl_lru_list_lock);
803 atomic_inc(cli->cl_lru_left);
804 /* this is a great place to release more LRU pages if
805 * this osc occupies too many LRU pages and kernel is
806 * stealing one of them.
807 * cl_lru_shrinkers is to avoid recursive call in case
808 * we're already in the context of osc_lru_shrink(). */
809 if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
810 !memory_pressure_get())
811 osc_lru_shrink(cli, osc_cache_too_much(cli));
812 wake_up(&osc_lru_waitq);
815 LASSERT(list_empty(&opg->ops_lru));
819 static inline int max_to_shrink(struct client_obd *cli)
821 return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
824 static int osc_lru_reclaim(struct client_obd *cli)
826 struct cl_client_cache *cache = cli->cl_cache;
830 LASSERT(cache != NULL);
831 LASSERT(!list_empty(&cache->ccc_lru));
833 rc = osc_lru_shrink(cli, lru_shrink_min);
835 CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
836 cli->cl_import->imp_obd->obd_name, rc, cli);
840 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
841 cli->cl_import->imp_obd->obd_name, cli,
842 atomic_read(&cli->cl_lru_in_list),
843 atomic_read(&cli->cl_lru_busy));
845 /* Reclaim LRU slots from other client_obd as it can't free enough
846 * from its own. This should rarely happen. */
847 spin_lock(&cache->ccc_lru_lock);
848 cache->ccc_lru_shrinkers++;
849 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
851 max_scans = atomic_read(&cache->ccc_users);
852 while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
853 cli = list_entry(cache->ccc_lru.next, struct client_obd,
856 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
857 cli->cl_import->imp_obd->obd_name, cli,
858 atomic_read(&cli->cl_lru_in_list),
859 atomic_read(&cli->cl_lru_busy));
861 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
862 if (atomic_read(&cli->cl_lru_in_list) > 0) {
863 spin_unlock(&cache->ccc_lru_lock);
865 rc = osc_lru_shrink(cli, max_to_shrink(cli));
866 spin_lock(&cache->ccc_lru_lock);
871 spin_unlock(&cache->ccc_lru_lock);
873 CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
874 cli->cl_import->imp_obd->obd_name, cli, rc);
878 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
879 struct osc_page *opg)
881 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
882 struct client_obd *cli = osc_cli(obj);
886 if (cli->cl_cache == NULL) /* shall not be in LRU */
889 LASSERT(atomic_read(cli->cl_lru_left) >= 0);
890 while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
893 /* run out of LRU spaces, try to drop some by itself */
894 rc = osc_lru_reclaim(cli);
902 /* slowest case, all of caching pages are busy, notifying
903 * other OSCs that we're lack of LRU slots. */
904 atomic_inc(&osc_lru_waiters);
906 gen = atomic_read(&cli->cl_lru_in_list);
907 rc = l_wait_event(osc_lru_waitq,
908 atomic_read(cli->cl_lru_left) > 0 ||
909 (atomic_read(&cli->cl_lru_in_list) > 0 &&
910 gen != atomic_read(&cli->cl_lru_in_list)),
913 atomic_dec(&osc_lru_waiters);
919 atomic_inc(&cli->cl_lru_busy);