[go: nahoru, domu]

1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_lock for LOV layer.
37 *
38 *   Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41#define DEBUG_SUBSYSTEM S_LOV
42
43#include "lov_cl_internal.h"
44
45/** \addtogroup lov
46 *  @{
47 */
48
49static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50					       struct cl_lock *parent);
51
52static int lov_lock_unuse(const struct lu_env *env,
53			  const struct cl_lock_slice *slice);
54/*****************************************************************************
55 *
56 * Lov lock operations.
57 *
58 */
59
60static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61						   struct cl_lock *parent,
62						   struct lov_lock_sub *lls)
63{
64	struct lov_sublock_env *subenv;
65	struct lov_io	  *lio    = lov_env_io(env);
66	struct cl_io	   *io     = lio->lis_cl.cis_io;
67	struct lov_io_sub      *sub;
68
69	subenv = &lov_env_session(env)->ls_subenv;
70
71	/*
72	 * FIXME: We tend to use the subio's env & io to call the sublock
73	 * lock operations because osc lock sometimes stores some control
74	 * variables in thread's IO information(Now only lockless information).
75	 * However, if the lock's host(object) is different from the object
76	 * for current IO, we have no way to get the subenv and subio because
77	 * they are not initialized at all. As a temp fix, in this case,
78	 * we still borrow the parent's env to call sublock operations.
79	 */
80	if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
81		subenv->lse_env = env;
82		subenv->lse_io  = io;
83		subenv->lse_sub = NULL;
84	} else {
85		sub = lov_sub_get(env, lio, lls->sub_stripe);
86		if (!IS_ERR(sub)) {
87			subenv->lse_env = sub->sub_env;
88			subenv->lse_io  = sub->sub_io;
89			subenv->lse_sub = sub;
90		} else {
91			subenv = (void *)sub;
92		}
93	}
94	return subenv;
95}
96
97static void lov_sublock_env_put(struct lov_sublock_env *subenv)
98{
99	if (subenv && subenv->lse_sub)
100		lov_sub_put(subenv->lse_sub);
101}
102
103static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
104			      struct cl_lock *sublock, int idx,
105			      struct lov_lock_link *link)
106{
107	struct lovsub_lock *lsl;
108	struct cl_lock     *parent = lck->lls_cl.cls_lock;
109	int		 rc;
110
111	LASSERT(cl_lock_is_mutexed(parent));
112	LASSERT(cl_lock_is_mutexed(sublock));
113
114	lsl = cl2sub_lock(sublock);
115	/*
116	 * check that sub-lock doesn't have lock link to this top-lock.
117	 */
118	LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
119	LASSERT(idx < lck->lls_nr);
120
121	lck->lls_sub[idx].sub_lock = lsl;
122	lck->lls_nr_filled++;
123	LASSERT(lck->lls_nr_filled <= lck->lls_nr);
124	list_add_tail(&link->lll_list, &lsl->lss_parents);
125	link->lll_idx = idx;
126	link->lll_super = lck;
127	cl_lock_get(parent);
128	lu_ref_add(&parent->cll_reference, "lov-child", sublock);
129	lck->lls_sub[idx].sub_flags |= LSF_HELD;
130	cl_lock_user_add(env, sublock);
131
132	rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
133	LASSERT(rc == 0); /* there is no way this can fail, currently */
134}
135
136static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
137					 const struct cl_io *io,
138					 struct lov_lock *lck,
139					 int idx, struct lov_lock_link **out)
140{
141	struct cl_lock       *sublock;
142	struct cl_lock       *parent;
143	struct lov_lock_link *link;
144
145	LASSERT(idx < lck->lls_nr);
146
147	OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, GFP_NOFS);
148	if (link != NULL) {
149		struct lov_sublock_env *subenv;
150		struct lov_lock_sub  *lls;
151		struct cl_lock_descr *descr;
152
153		parent = lck->lls_cl.cls_lock;
154		lls    = &lck->lls_sub[idx];
155		descr  = &lls->sub_got;
156
157		subenv = lov_sublock_env_get(env, parent, lls);
158		if (!IS_ERR(subenv)) {
159			/* CAVEAT: Don't try to add a field in lov_lock_sub
160			 * to remember the subio. This is because lock is able
161			 * to be cached, but this is not true for IO. This
162			 * further means a sublock might be referenced in
163			 * different io context. -jay */
164
165			sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166					       descr, "lov-parent", parent);
167			lov_sublock_env_put(subenv);
168		} else {
169			/* error occurs. */
170			sublock = (void *)subenv;
171		}
172
173		if (!IS_ERR(sublock))
174			*out = link;
175		else
176			OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
177	} else
178		sublock = ERR_PTR(-ENOMEM);
179	return sublock;
180}
181
182static void lov_sublock_unlock(const struct lu_env *env,
183			       struct lovsub_lock *lsl,
184			       struct cl_lock_closure *closure,
185			       struct lov_sublock_env *subenv)
186{
187	lov_sublock_env_put(subenv);
188	lsl->lss_active = NULL;
189	cl_lock_disclosure(env, closure);
190}
191
192static int lov_sublock_lock(const struct lu_env *env,
193			    struct lov_lock *lck,
194			    struct lov_lock_sub *lls,
195			    struct cl_lock_closure *closure,
196			    struct lov_sublock_env **lsep)
197{
198	struct lovsub_lock *sublock;
199	struct cl_lock     *child;
200	int		 result = 0;
201
202	LASSERT(list_empty(&closure->clc_list));
203
204	sublock = lls->sub_lock;
205	child = sublock->lss_cl.cls_lock;
206	result = cl_lock_closure_build(env, child, closure);
207	if (result == 0) {
208		struct cl_lock *parent = closure->clc_origin;
209
210		LASSERT(cl_lock_is_mutexed(child));
211		sublock->lss_active = parent;
212
213		if (unlikely((child->cll_state == CLS_FREEING) ||
214			     (child->cll_flags & CLF_CANCELLED))) {
215			struct lov_lock_link *link;
216			/*
217			 * we could race with lock deletion which temporarily
218			 * put the lock in freeing state, bug 19080.
219			 */
220			LASSERT(!(lls->sub_flags & LSF_HELD));
221
222			link = lov_lock_link_find(env, lck, sublock);
223			LASSERT(link != NULL);
224			lov_lock_unlink(env, link, sublock);
225			lov_sublock_unlock(env, sublock, closure, NULL);
226			lck->lls_cancel_race = 1;
227			result = CLO_REPEAT;
228		} else if (lsep) {
229			struct lov_sublock_env *subenv;
230			subenv = lov_sublock_env_get(env, parent, lls);
231			if (IS_ERR(subenv)) {
232				lov_sublock_unlock(env, sublock,
233						   closure, NULL);
234				result = PTR_ERR(subenv);
235			} else {
236				*lsep = subenv;
237			}
238		}
239	}
240	return result;
241}
242
243/**
244 * Updates the result of a top-lock operation from a result of sub-lock
245 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
246 * over sub-locks and lov_subresult() is used to calculate return value of a
247 * top-operation. To this end, possible return values of sub-operations are
248 * ordered as
249 *
250 *     - 0		  success
251 *     - CLO_WAIT	   wait for event
252 *     - CLO_REPEAT	 repeat top-operation
253 *     - -ne		fundamental error
254 *
255 * Top-level return code can only go down through this list. CLO_REPEAT
256 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
257 * has to be rechecked by the upper layer.
258 */
259static int lov_subresult(int result, int rc)
260{
261	int result_rank;
262	int rc_rank;
263
264	LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
265		 "result = %d", result);
266	LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
267		 "rc = %d\n", rc);
268	CLASSERT(CLO_WAIT < CLO_REPEAT);
269
270	/* calculate ranks in the ordering above */
271	result_rank = result < 0 ? 1 + CLO_REPEAT : result;
272	rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
273
274	if (result_rank < rc_rank)
275		result = rc;
276	return result;
277}
278
279/**
280 * Creates sub-locks for a given lov_lock for the first time.
281 *
282 * Goes through all sub-objects of top-object, and creates sub-locks on every
283 * sub-object intersecting with top-lock extent. This is complicated by the
284 * fact that top-lock (that is being created) can be accessed concurrently
285 * through already created sub-locks (possibly shared with other top-locks).
286 */
287static int lov_lock_sub_init(const struct lu_env *env,
288			     struct lov_lock *lck, const struct cl_io *io)
289{
290	int result = 0;
291	int i;
292	int nr;
293	u64 start;
294	u64 end;
295	u64 file_start;
296	u64 file_end;
297
298	struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
299	struct lov_layout_raid0 *r0     = lov_r0(loo);
300	struct cl_lock	  *parent = lck->lls_cl.cls_lock;
301
302	lck->lls_orig = parent->cll_descr;
303	file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
304	file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
305
306	for (i = 0, nr = 0; i < r0->lo_nr; i++) {
307		/*
308		 * XXX for wide striping smarter algorithm is desirable,
309		 * breaking out of the loop, early.
310		 */
311		if (lov_stripe_intersects(loo->lo_lsm, i,
312					  file_start, file_end, &start, &end))
313			nr++;
314	}
315	LASSERT(nr > 0);
316	OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof(lck->lls_sub[0]));
317	if (lck->lls_sub == NULL)
318		return -ENOMEM;
319
320	lck->lls_nr = nr;
321	/*
322	 * First, fill in sub-lock descriptions in
323	 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
324	 * (called below in this function, and by lov_lock_enqueue()) to
325	 * create sub-locks. At this moment, no other thread can access
326	 * top-lock.
327	 */
328	for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
329		if (lov_stripe_intersects(loo->lo_lsm, i,
330					  file_start, file_end, &start, &end)) {
331			struct cl_lock_descr *descr;
332
333			descr = &lck->lls_sub[nr].sub_descr;
334
335			LASSERT(descr->cld_obj == NULL);
336			descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
337			descr->cld_start = cl_index(descr->cld_obj, start);
338			descr->cld_end   = cl_index(descr->cld_obj, end);
339			descr->cld_mode  = parent->cll_descr.cld_mode;
340			descr->cld_gid   = parent->cll_descr.cld_gid;
341			descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
342			/* XXX has no effect */
343			lck->lls_sub[nr].sub_got = *descr;
344			lck->lls_sub[nr].sub_stripe = i;
345			nr++;
346		}
347	}
348	LASSERT(nr == lck->lls_nr);
349
350	/*
351	 * Some sub-locks can be missing at this point. This is not a problem,
352	 * because enqueue will create them anyway. Main duty of this function
353	 * is to fill in sub-lock descriptions in a race free manner.
354	 */
355	return result;
356}
357
358static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
359			       int i, int deluser, int rc)
360{
361	struct cl_lock *parent = lck->lls_cl.cls_lock;
362
363	LASSERT(cl_lock_is_mutexed(parent));
364
365	if (lck->lls_sub[i].sub_flags & LSF_HELD) {
366		struct cl_lock    *sublock;
367		int dying;
368
369		LASSERT(lck->lls_sub[i].sub_lock != NULL);
370		sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
371		LASSERT(cl_lock_is_mutexed(sublock));
372
373		lck->lls_sub[i].sub_flags &= ~LSF_HELD;
374		if (deluser)
375			cl_lock_user_del(env, sublock);
376		/*
377		 * If the last hold is released, and cancellation is pending
378		 * for a sub-lock, release parent mutex, to avoid keeping it
379		 * while sub-lock is being paged out.
380		 */
381		dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
382			 sublock->cll_descr.cld_mode == CLM_GROUP ||
383			 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
384			sublock->cll_holds == 1;
385		if (dying)
386			cl_lock_mutex_put(env, parent);
387		cl_lock_unhold(env, sublock, "lov-parent", parent);
388		if (dying) {
389			cl_lock_mutex_get(env, parent);
390			rc = lov_subresult(rc, CLO_REPEAT);
391		}
392		/*
393		 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
394		 * not backed by a reference on a
395		 * sub-lock. lovsub_lock_delete() will clear
396		 * lck->lls_sub[i].sub_lock under semaphores, just before
397		 * sub-lock is destroyed.
398		 */
399	}
400	return rc;
401}
402
403static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
404			     int i)
405{
406	struct cl_lock *parent = lck->lls_cl.cls_lock;
407
408	LASSERT(cl_lock_is_mutexed(parent));
409
410	if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
411		struct cl_lock *sublock;
412
413		LASSERT(lck->lls_sub[i].sub_lock != NULL);
414		sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
415		LASSERT(cl_lock_is_mutexed(sublock));
416		LASSERT(sublock->cll_state != CLS_FREEING);
417
418		lck->lls_sub[i].sub_flags |= LSF_HELD;
419
420		cl_lock_get_trust(sublock);
421		cl_lock_hold_add(env, sublock, "lov-parent", parent);
422		cl_lock_user_add(env, sublock);
423		cl_lock_put(env, sublock);
424	}
425}
426
427static void lov_lock_fini(const struct lu_env *env,
428			  struct cl_lock_slice *slice)
429{
430	struct lov_lock *lck;
431	int i;
432
433	lck = cl2lov_lock(slice);
434	LASSERT(lck->lls_nr_filled == 0);
435	if (lck->lls_sub != NULL) {
436		for (i = 0; i < lck->lls_nr; ++i)
437			/*
438			 * No sub-locks exists at this point, as sub-lock has
439			 * a reference on its parent.
440			 */
441			LASSERT(lck->lls_sub[i].sub_lock == NULL);
442		OBD_FREE_LARGE(lck->lls_sub,
443			       lck->lls_nr * sizeof(lck->lls_sub[0]));
444	}
445	OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
446}
447
448static int lov_lock_enqueue_wait(const struct lu_env *env,
449				 struct lov_lock *lck,
450				 struct cl_lock *sublock)
451{
452	struct cl_lock *lock = lck->lls_cl.cls_lock;
453	int	     result;
454
455	LASSERT(cl_lock_is_mutexed(lock));
456
457	cl_lock_mutex_put(env, lock);
458	result = cl_lock_enqueue_wait(env, sublock, 0);
459	cl_lock_mutex_get(env, lock);
460	return result ?: CLO_REPEAT;
461}
462
463/**
464 * Tries to advance a state machine of a given sub-lock toward enqueuing of
465 * the top-lock.
466 *
467 * \retval 0 if state-transition can proceed
468 * \retval -ve otherwise.
469 */
470static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
471				struct cl_lock *sublock,
472				struct cl_io *io, __u32 enqflags, int last)
473{
474	int result;
475
476	/* first, try to enqueue a sub-lock ... */
477	result = cl_enqueue_try(env, sublock, io, enqflags);
478	if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
479		/* if it is enqueued, try to `wait' on it---maybe it's already
480		 * granted */
481		result = cl_wait_try(env, sublock);
482		if (result == CLO_REENQUEUED)
483			result = CLO_WAIT;
484	}
485	/*
486	 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
487	 * parallel, otherwise---enqueue has to wait until sub-lock is granted
488	 * before proceeding to the next one.
489	 */
490	if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
491	    (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
492		result = 0;
493	return result;
494}
495
496/**
497 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
498 */
499static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
500			    struct cl_io *io, struct lov_lock *lck, int idx)
501{
502	struct lov_lock_link *link = NULL;
503	struct cl_lock       *sublock;
504	int		   result;
505
506	LASSERT(parent->cll_depth == 1);
507	cl_lock_mutex_put(env, parent);
508	sublock = lov_sublock_alloc(env, io, lck, idx, &link);
509	if (!IS_ERR(sublock))
510		cl_lock_mutex_get(env, sublock);
511	cl_lock_mutex_get(env, parent);
512
513	if (!IS_ERR(sublock)) {
514		cl_lock_get_trust(sublock);
515		if (parent->cll_state == CLS_QUEUING &&
516		    lck->lls_sub[idx].sub_lock == NULL) {
517			lov_sublock_adopt(env, lck, sublock, idx, link);
518		} else {
519			OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
520			/* other thread allocated sub-lock, or enqueue is no
521			 * longer going on */
522			cl_lock_mutex_put(env, parent);
523			cl_lock_unhold(env, sublock, "lov-parent", parent);
524			cl_lock_mutex_get(env, parent);
525		}
526		cl_lock_mutex_put(env, sublock);
527		cl_lock_put(env, sublock);
528		result = CLO_REPEAT;
529	} else
530		result = PTR_ERR(sublock);
531	return result;
532}
533
534/**
535 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
536 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
537 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
538 * state machines in the face of sub-locks sharing (by multiple top-locks),
539 * and concurrent sub-lock cancellations.
540 */
541static int lov_lock_enqueue(const struct lu_env *env,
542			    const struct cl_lock_slice *slice,
543			    struct cl_io *io, __u32 enqflags)
544{
545	struct cl_lock	 *lock    = slice->cls_lock;
546	struct lov_lock	*lck     = cl2lov_lock(slice);
547	struct cl_lock_closure *closure = lov_closure_get(env, lock);
548	int i;
549	int result;
550	enum cl_lock_state minstate;
551
552	for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
553		int rc;
554		struct lovsub_lock     *sub;
555		struct lov_lock_sub    *lls;
556		struct cl_lock	 *sublock;
557		struct lov_sublock_env *subenv;
558
559		if (lock->cll_state != CLS_QUEUING) {
560			/*
561			 * Lock might have left QUEUING state if previous
562			 * iteration released its mutex. Stop enqueing in this
563			 * case and let the upper layer to decide what to do.
564			 */
565			LASSERT(i > 0 && result != 0);
566			break;
567		}
568
569		lls = &lck->lls_sub[i];
570		sub = lls->sub_lock;
571		/*
572		 * Sub-lock might have been canceled, while top-lock was
573		 * cached.
574		 */
575		if (sub == NULL) {
576			result = lov_sublock_fill(env, lock, io, lck, i);
577			/* lov_sublock_fill() released @lock mutex,
578			 * restart. */
579			break;
580		}
581		sublock = sub->lss_cl.cls_lock;
582		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
583		if (rc == 0) {
584			lov_sublock_hold(env, lck, i);
585			rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
586						  subenv->lse_io, enqflags,
587						  i == lck->lls_nr - 1);
588			minstate = min(minstate, sublock->cll_state);
589			if (rc == CLO_WAIT) {
590				switch (sublock->cll_state) {
591				case CLS_QUEUING:
592					/* take recursive mutex, the lock is
593					 * released in lov_lock_enqueue_wait.
594					 */
595					cl_lock_mutex_get(env, sublock);
596					lov_sublock_unlock(env, sub, closure,
597							   subenv);
598					rc = lov_lock_enqueue_wait(env, lck,
599								   sublock);
600					break;
601				case CLS_CACHED:
602					cl_lock_get(sublock);
603					/* take recursive mutex of sublock */
604					cl_lock_mutex_get(env, sublock);
605					/* need to release all locks in closure
606					 * otherwise it may deadlock. LU-2683.*/
607					lov_sublock_unlock(env, sub, closure,
608							   subenv);
609					/* sublock and parent are held. */
610					rc = lov_sublock_release(env, lck, i,
611								 1, rc);
612					cl_lock_mutex_put(env, sublock);
613					cl_lock_put(env, sublock);
614					break;
615				default:
616					lov_sublock_unlock(env, sub, closure,
617							   subenv);
618					break;
619				}
620			} else {
621				LASSERT(sublock->cll_conflict == NULL);
622				lov_sublock_unlock(env, sub, closure, subenv);
623			}
624		}
625		result = lov_subresult(result, rc);
626		if (result != 0)
627			break;
628	}
629	cl_lock_closure_fini(closure);
630	return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
631}
632
633static int lov_lock_unuse(const struct lu_env *env,
634			  const struct cl_lock_slice *slice)
635{
636	struct lov_lock	*lck     = cl2lov_lock(slice);
637	struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
638	int i;
639	int result;
640
641	for (result = 0, i = 0; i < lck->lls_nr; ++i) {
642		int rc;
643		struct lovsub_lock     *sub;
644		struct cl_lock	 *sublock;
645		struct lov_lock_sub    *lls;
646		struct lov_sublock_env *subenv;
647
648		/* top-lock state cannot change concurrently, because single
649		 * thread (one that released the last hold) carries unlocking
650		 * to the completion. */
651		LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
652		lls = &lck->lls_sub[i];
653		sub = lls->sub_lock;
654		if (sub == NULL)
655			continue;
656
657		sublock = sub->lss_cl.cls_lock;
658		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
659		if (rc == 0) {
660			if (lls->sub_flags & LSF_HELD) {
661				LASSERT(sublock->cll_state == CLS_HELD ||
662					sublock->cll_state == CLS_ENQUEUED);
663				rc = cl_unuse_try(subenv->lse_env, sublock);
664				rc = lov_sublock_release(env, lck, i, 0, rc);
665			}
666			lov_sublock_unlock(env, sub, closure, subenv);
667		}
668		result = lov_subresult(result, rc);
669	}
670
671	if (result == 0 && lck->lls_cancel_race) {
672		lck->lls_cancel_race = 0;
673		result = -ESTALE;
674	}
675	cl_lock_closure_fini(closure);
676	return result;
677}
678
679
680static void lov_lock_cancel(const struct lu_env *env,
681			   const struct cl_lock_slice *slice)
682{
683	struct lov_lock	*lck     = cl2lov_lock(slice);
684	struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
685	int i;
686	int result;
687
688	for (result = 0, i = 0; i < lck->lls_nr; ++i) {
689		int rc;
690		struct lovsub_lock     *sub;
691		struct cl_lock	 *sublock;
692		struct lov_lock_sub    *lls;
693		struct lov_sublock_env *subenv;
694
695		/* top-lock state cannot change concurrently, because single
696		 * thread (one that released the last hold) carries unlocking
697		 * to the completion. */
698		lls = &lck->lls_sub[i];
699		sub = lls->sub_lock;
700		if (sub == NULL)
701			continue;
702
703		sublock = sub->lss_cl.cls_lock;
704		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
705		if (rc == 0) {
706			if (!(lls->sub_flags & LSF_HELD)) {
707				lov_sublock_unlock(env, sub, closure, subenv);
708				continue;
709			}
710
711			switch (sublock->cll_state) {
712			case CLS_HELD:
713				rc = cl_unuse_try(subenv->lse_env, sublock);
714				lov_sublock_release(env, lck, i, 0, 0);
715				break;
716			default:
717				lov_sublock_release(env, lck, i, 1, 0);
718				break;
719			}
720			lov_sublock_unlock(env, sub, closure, subenv);
721		}
722
723		if (rc == CLO_REPEAT) {
724			--i;
725			continue;
726		}
727
728		result = lov_subresult(result, rc);
729	}
730
731	if (result)
732		CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
733			      "lov_lock_cancel fails with %d.\n", result);
734
735	cl_lock_closure_fini(closure);
736}
737
738static int lov_lock_wait(const struct lu_env *env,
739			 const struct cl_lock_slice *slice)
740{
741	struct lov_lock	*lck     = cl2lov_lock(slice);
742	struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
743	enum cl_lock_state      minstate;
744	int		     reenqueued;
745	int		     result;
746	int		     i;
747
748again:
749	for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
750	     i < lck->lls_nr; ++i) {
751		int rc;
752		struct lovsub_lock     *sub;
753		struct cl_lock	 *sublock;
754		struct lov_lock_sub    *lls;
755		struct lov_sublock_env *subenv;
756
757		lls = &lck->lls_sub[i];
758		sub = lls->sub_lock;
759		LASSERT(sub != NULL);
760		sublock = sub->lss_cl.cls_lock;
761		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
762		if (rc == 0) {
763			LASSERT(sublock->cll_state >= CLS_ENQUEUED);
764			if (sublock->cll_state < CLS_HELD)
765				rc = cl_wait_try(env, sublock);
766
767			minstate = min(minstate, sublock->cll_state);
768			lov_sublock_unlock(env, sub, closure, subenv);
769		}
770		if (rc == CLO_REENQUEUED) {
771			reenqueued++;
772			rc = 0;
773		}
774		result = lov_subresult(result, rc);
775		if (result != 0)
776			break;
777	}
778	/* Each sublock only can be reenqueued once, so will not loop for
779	 * ever. */
780	if (result == 0 && reenqueued != 0)
781		goto again;
782	cl_lock_closure_fini(closure);
783	return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
784}
785
786static int lov_lock_use(const struct lu_env *env,
787			const struct cl_lock_slice *slice)
788{
789	struct lov_lock	*lck     = cl2lov_lock(slice);
790	struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
791	int		     result;
792	int		     i;
793
794	LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
795
796	for (result = 0, i = 0; i < lck->lls_nr; ++i) {
797		int rc;
798		struct lovsub_lock     *sub;
799		struct cl_lock	 *sublock;
800		struct lov_lock_sub    *lls;
801		struct lov_sublock_env *subenv;
802
803		LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
804
805		lls = &lck->lls_sub[i];
806		sub = lls->sub_lock;
807		if (sub == NULL) {
808			/*
809			 * Sub-lock might have been canceled, while top-lock was
810			 * cached.
811			 */
812			result = -ESTALE;
813			break;
814		}
815
816		sublock = sub->lss_cl.cls_lock;
817		rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
818		if (rc == 0) {
819			LASSERT(sublock->cll_state != CLS_FREEING);
820			lov_sublock_hold(env, lck, i);
821			if (sublock->cll_state == CLS_CACHED) {
822				rc = cl_use_try(subenv->lse_env, sublock, 0);
823				if (rc != 0)
824					rc = lov_sublock_release(env, lck,
825								 i, 1, rc);
826			} else if (sublock->cll_state == CLS_NEW) {
827				/* Sub-lock might have been canceled, while
828				 * top-lock was cached. */
829				result = -ESTALE;
830				lov_sublock_release(env, lck, i, 1, result);
831			}
832			lov_sublock_unlock(env, sub, closure, subenv);
833		}
834		result = lov_subresult(result, rc);
835		if (result != 0)
836			break;
837	}
838
839	if (lck->lls_cancel_race) {
840		/*
841		 * If there is unlocking happened at the same time, then
842		 * sublock_lock state should be FREEING, and lov_sublock_lock
843		 * should return CLO_REPEAT. In this case, it should return
844		 * ESTALE, and up layer should reset the lock state to be NEW.
845		 */
846		lck->lls_cancel_race = 0;
847		LASSERT(result != 0);
848		result = -ESTALE;
849	}
850	cl_lock_closure_fini(closure);
851	return result;
852}
853
854#if 0
855static int lock_lock_multi_match()
856{
857	struct cl_lock	  *lock    = slice->cls_lock;
858	struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
859	struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
860	struct lov_layout_raid0 *r0      = lov_r0(loo);
861	struct lov_lock_sub     *sub;
862	struct cl_object	*subobj;
863	u64  fstart;
864	u64  fend;
865	u64  start;
866	u64  end;
867	int i;
868
869	fstart = cl_offset(need->cld_obj, need->cld_start);
870	fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
871	subneed->cld_mode = need->cld_mode;
872	cl_lock_mutex_get(env, lock);
873	for (i = 0; i < lov->lls_nr; ++i) {
874		sub = &lov->lls_sub[i];
875		if (sub->sub_lock == NULL)
876			continue;
877		subobj = sub->sub_descr.cld_obj;
878		if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
879					   fstart, fend, &start, &end))
880			continue;
881		subneed->cld_start = cl_index(subobj, start);
882		subneed->cld_end   = cl_index(subobj, end);
883		subneed->cld_obj   = subobj;
884		if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
885			result = 0;
886			break;
887		}
888	}
889	cl_lock_mutex_put(env, lock);
890}
891#endif
892
893/**
894 * Check if the extent region \a descr is covered by \a child against the
895 * specific \a stripe.
896 */
897static int lov_lock_stripe_is_matching(const struct lu_env *env,
898				       struct lov_object *lov, int stripe,
899				       const struct cl_lock_descr *child,
900				       const struct cl_lock_descr *descr)
901{
902	struct lov_stripe_md *lsm = lov->lo_lsm;
903	u64 start;
904	u64 end;
905	int result;
906
907	if (lov_r0(lov)->lo_nr == 1)
908		return cl_lock_ext_match(child, descr);
909
910	/*
911	 * For a multi-stripes object:
912	 * - make sure the descr only covers child's stripe, and
913	 * - check if extent is matching.
914	 */
915	start = cl_offset(&lov->lo_cl, descr->cld_start);
916	end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
917	result = end - start <= lsm->lsm_stripe_size &&
918		 stripe == lov_stripe_number(lsm, start) &&
919		 stripe == lov_stripe_number(lsm, end);
920	if (result) {
921		struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
922		u64 sub_start;
923		u64 sub_end;
924
925		subd->cld_obj  = NULL;   /* don't need sub object at all */
926		subd->cld_mode = descr->cld_mode;
927		subd->cld_gid  = descr->cld_gid;
928		result = lov_stripe_intersects(lsm, stripe, start, end,
929					       &sub_start, &sub_end);
930		LASSERT(result);
931		subd->cld_start = cl_index(child->cld_obj, sub_start);
932		subd->cld_end   = cl_index(child->cld_obj, sub_end);
933		result = cl_lock_ext_match(child, subd);
934	}
935	return result;
936}
937
938/**
939 * An implementation of cl_lock_operations::clo_fits_into() method.
940 *
941 * Checks whether a lock (given by \a slice) is suitable for \a
942 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
943 * O_APPEND write.
944 *
945 * \see ccc_lock_fits_into().
946 */
947static int lov_lock_fits_into(const struct lu_env *env,
948			      const struct cl_lock_slice *slice,
949			      const struct cl_lock_descr *need,
950			      const struct cl_io *io)
951{
952	struct lov_lock   *lov = cl2lov_lock(slice);
953	struct lov_object *obj = cl2lov(slice->cls_obj);
954	int result;
955
956	LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
957	LASSERT(lov->lls_nr > 0);
958
959	/* for top lock, it's necessary to match enq flags otherwise it will
960	 * run into problem if a sublock is missing and reenqueue. */
961	if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
962		return 0;
963
964	if (need->cld_mode == CLM_GROUP)
965		/*
966		 * always allow to match group lock.
967		 */
968		result = cl_lock_ext_match(&lov->lls_orig, need);
969	else if (lov->lls_nr == 1) {
970		struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
971		result = lov_lock_stripe_is_matching(env,
972						     cl2lov(slice->cls_obj),
973						     lov->lls_sub[0].sub_stripe,
974						     got, need);
975	} else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
976		   !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
977		/*
978		 * Multi-stripe locks are only suitable for `quick' IO and for
979		 * glimpse.
980		 */
981		result = 0;
982	else
983		/*
984		 * Most general case: multi-stripe existing lock, and
985		 * (potentially) multi-stripe @need lock. Check that @need is
986		 * covered by @lov's sub-locks.
987		 *
988		 * For now, ignore lock expansions made by the server, and
989		 * match against original lock extent.
990		 */
991		result = cl_lock_ext_match(&lov->lls_orig, need);
992	CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
993	       PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
994	       lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
995	       result);
996	return result;
997}
998
999void lov_lock_unlink(const struct lu_env *env,
1000		     struct lov_lock_link *link, struct lovsub_lock *sub)
1001{
1002	struct lov_lock *lck    = link->lll_super;
1003	struct cl_lock  *parent = lck->lls_cl.cls_lock;
1004
1005	LASSERT(cl_lock_is_mutexed(parent));
1006	LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1007
1008	list_del_init(&link->lll_list);
1009	LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
1010	/* yank this sub-lock from parent's array */
1011	lck->lls_sub[link->lll_idx].sub_lock = NULL;
1012	LASSERT(lck->lls_nr_filled > 0);
1013	lck->lls_nr_filled--;
1014	lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
1015	cl_lock_put(env, parent);
1016	OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1017}
1018
1019struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1020					 struct lov_lock *lck,
1021					 struct lovsub_lock *sub)
1022{
1023	struct lov_lock_link *scan;
1024
1025	LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1026
1027	list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1028		if (scan->lll_super == lck)
1029			return scan;
1030	}
1031	return NULL;
1032}
1033
1034/**
1035 * An implementation of cl_lock_operations::clo_delete() method. This is
1036 * invoked for "top-to-bottom" delete, when lock destruction starts from the
1037 * top-lock, e.g., as a result of inode destruction.
1038 *
1039 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1040 * this is done separately elsewhere:
1041 *
1042 *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
1043 *       each sub-object, purging its locks;
1044 *
1045 *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1046 *       left in the cache.
1047 */
1048static void lov_lock_delete(const struct lu_env *env,
1049			    const struct cl_lock_slice *slice)
1050{
1051	struct lov_lock	*lck     = cl2lov_lock(slice);
1052	struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1053	struct lov_lock_link   *link;
1054	int		     rc;
1055	int		     i;
1056
1057	LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1058
1059	for (i = 0; i < lck->lls_nr; ++i) {
1060		struct lov_lock_sub *lls = &lck->lls_sub[i];
1061		struct lovsub_lock  *lsl = lls->sub_lock;
1062
1063		if (lsl == NULL) /* already removed */
1064			continue;
1065
1066		rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1067		if (rc == CLO_REPEAT) {
1068			--i;
1069			continue;
1070		}
1071
1072		LASSERT(rc == 0);
1073		LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
1074
1075		if (lls->sub_flags & LSF_HELD)
1076			lov_sublock_release(env, lck, i, 1, 0);
1077
1078		link = lov_lock_link_find(env, lck, lsl);
1079		LASSERT(link != NULL);
1080		lov_lock_unlink(env, link, lsl);
1081		LASSERT(lck->lls_sub[i].sub_lock == NULL);
1082
1083		lov_sublock_unlock(env, lsl, closure, NULL);
1084	}
1085
1086	cl_lock_closure_fini(closure);
1087}
1088
1089static int lov_lock_print(const struct lu_env *env, void *cookie,
1090			  lu_printer_t p, const struct cl_lock_slice *slice)
1091{
1092	struct lov_lock *lck = cl2lov_lock(slice);
1093	int	      i;
1094
1095	(*p)(env, cookie, "%d\n", lck->lls_nr);
1096	for (i = 0; i < lck->lls_nr; ++i) {
1097		struct lov_lock_sub *sub;
1098
1099		sub = &lck->lls_sub[i];
1100		(*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1101		if (sub->sub_lock != NULL)
1102			cl_lock_print(env, cookie, p,
1103				      sub->sub_lock->lss_cl.cls_lock);
1104		else
1105			(*p)(env, cookie, "---\n");
1106	}
1107	return 0;
1108}
1109
1110static const struct cl_lock_operations lov_lock_ops = {
1111	.clo_fini      = lov_lock_fini,
1112	.clo_enqueue   = lov_lock_enqueue,
1113	.clo_wait      = lov_lock_wait,
1114	.clo_use       = lov_lock_use,
1115	.clo_unuse     = lov_lock_unuse,
1116	.clo_cancel    = lov_lock_cancel,
1117	.clo_fits_into = lov_lock_fits_into,
1118	.clo_delete    = lov_lock_delete,
1119	.clo_print     = lov_lock_print
1120};
1121
1122int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1123			struct cl_lock *lock, const struct cl_io *io)
1124{
1125	struct lov_lock *lck;
1126	int result;
1127
1128	OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
1129	if (lck != NULL) {
1130		cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1131		result = lov_lock_sub_init(env, lck, io);
1132	} else
1133		result = -ENOMEM;
1134	return result;
1135}
1136
1137static void lov_empty_lock_fini(const struct lu_env *env,
1138				struct cl_lock_slice *slice)
1139{
1140	struct lov_lock *lck = cl2lov_lock(slice);
1141	OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
1142}
1143
1144static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
1145			lu_printer_t p, const struct cl_lock_slice *slice)
1146{
1147	(*p)(env, cookie, "empty\n");
1148	return 0;
1149}
1150
1151/* XXX: more methods will be added later. */
1152static const struct cl_lock_operations lov_empty_lock_ops = {
1153	.clo_fini  = lov_empty_lock_fini,
1154	.clo_print = lov_empty_lock_print
1155};
1156
1157int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
1158		struct cl_lock *lock, const struct cl_io *io)
1159{
1160	struct lov_lock *lck;
1161	int result = -ENOMEM;
1162
1163	OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
1164	if (lck != NULL) {
1165		cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
1166		lck->lls_orig = lock->cll_descr;
1167		result = 0;
1168	}
1169	return result;
1170}
1171
1172static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1173					       struct cl_lock *parent)
1174{
1175	struct cl_lock_closure *closure;
1176
1177	closure = &lov_env_info(env)->lti_closure;
1178	LASSERT(list_empty(&closure->clc_list));
1179	cl_lock_closure_init(env, closure, parent, 1);
1180	return closure;
1181}
1182
1183
1184/** @} lov */
1185