[go: nahoru, domu]

1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Definitions shared between vvp and liblustre, and other clients in the
37 * future.
38 *
39 *   Author: Oleg Drokin <oleg.drokin@sun.com>
40 *   Author: Nikita Danilov <nikita.danilov@sun.com>
41 */
42
43#ifndef LCLIENT_H
44#define LCLIENT_H
45
46blkcnt_t dirty_cnt(struct inode *inode);
47
48int cl_glimpse_size0(struct inode *inode, int agl);
49int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
50		    struct inode *inode, struct cl_object *clob, int agl);
51
52static inline int cl_glimpse_size(struct inode *inode)
53{
54	return cl_glimpse_size0(inode, 0);
55}
56
57static inline int cl_agl(struct inode *inode)
58{
59	return cl_glimpse_size0(inode, 1);
60}
61
62/**
63 * Locking policy for setattr.
64 */
65enum ccc_setattr_lock_type {
66	/** Locking is done by server */
67	SETATTR_NOLOCK,
68	/** Extent lock is enqueued */
69	SETATTR_EXTENT_LOCK,
70	/** Existing local extent lock is used */
71	SETATTR_MATCH_LOCK
72};
73
74
75/**
76 * IO state private to vvp or slp layers.
77 */
78struct ccc_io {
79	/** super class */
80	struct cl_io_slice     cui_cl;
81	struct cl_io_lock_link cui_link;
82	/**
83	 * I/O vector information to or from which read/write is going.
84	 */
85	struct iov_iter *cui_iter;
86	/**
87	 * Total size for the left IO.
88	 */
89	size_t cui_tot_count;
90
91	union {
92		struct {
93			enum ccc_setattr_lock_type cui_local_lock;
94		} setattr;
95	} u;
96	/**
97	 * True iff io is processing glimpse right now.
98	 */
99	int		  cui_glimpse;
100	/**
101	 * Layout version when this IO is initialized
102	 */
103	__u32		cui_layout_gen;
104	/**
105	 * File descriptor against which IO is done.
106	 */
107	struct ll_file_data *cui_fd;
108	struct kiocb *cui_iocb;
109};
110
111/**
112 * True, if \a io is a normal io, False for splice_{read,write}.
113 * must be implemented in arch specific code.
114 */
115int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
116
117extern struct lu_context_key ccc_key;
118extern struct lu_context_key ccc_session_key;
119
120struct ccc_thread_info {
121	struct cl_lock_descr cti_descr;
122	struct cl_io	 cti_io;
123	struct cl_attr       cti_attr;
124};
125
126static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
127{
128	struct ccc_thread_info      *info;
129
130	info = lu_context_key_get(&env->le_ctx, &ccc_key);
131	LASSERT(info != NULL);
132	return info;
133}
134
135static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
136{
137	struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
138	memset(attr, 0, sizeof(*attr));
139	return attr;
140}
141
142static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
143{
144	struct cl_io *io = &ccc_env_info(env)->cti_io;
145	memset(io, 0, sizeof(*io));
146	return io;
147}
148
149struct ccc_session {
150	struct ccc_io cs_ios;
151};
152
153static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
154{
155	struct ccc_session *ses;
156
157	ses = lu_context_key_get(env->le_ses, &ccc_session_key);
158	LASSERT(ses != NULL);
159	return ses;
160}
161
162static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
163{
164	return &ccc_env_session(env)->cs_ios;
165}
166
167/**
168 * ccc-private object state.
169 */
170struct ccc_object {
171	struct cl_object_header cob_header;
172	struct cl_object	cob_cl;
173	struct inode	   *cob_inode;
174
175	/**
176	 * A list of dirty pages pending IO in the cache. Used by
177	 * SOM. Protected by ll_inode_info::lli_lock.
178	 *
179	 * \see ccc_page::cpg_pending_linkage
180	 */
181	struct list_head	     cob_pending_list;
182
183	/**
184	 * Access this counter is protected by inode->i_sem. Now that
185	 * the lifetime of transient pages must be covered by inode sem,
186	 * we don't need to hold any lock..
187	 */
188	int		     cob_transient_pages;
189	/**
190	 * Number of outstanding mmaps on this file.
191	 *
192	 * \see ll_vm_open(), ll_vm_close().
193	 */
194	atomic_t	    cob_mmap_cnt;
195
196	/**
197	 * various flags
198	 * cob_discard_page_warned
199	 *     if pages belonging to this object are discarded when a client
200	 * is evicted, some debug info will be printed, this flag will be set
201	 * during processing the first discarded page, then avoid flooding
202	 * debug message for lots of discarded pages.
203	 *
204	 * \see ll_dirty_page_discard_warn.
205	 */
206	unsigned int		cob_discard_page_warned:1;
207};
208
209/**
210 * ccc-private page state.
211 */
212struct ccc_page {
213	struct cl_page_slice cpg_cl;
214	int		  cpg_defer_uptodate;
215	int		  cpg_ra_used;
216	int		  cpg_write_queued;
217	/**
218	 * Non-empty iff this page is already counted in
219	 * ccc_object::cob_pending_list. Protected by
220	 * ccc_object::cob_pending_guard. This list is only used as a flag,
221	 * that is, never iterated through, only checked for list_empty(), but
222	 * having a list is useful for debugging.
223	 */
224	struct list_head	   cpg_pending_linkage;
225	/** VM page */
226	struct page	  *cpg_page;
227};
228
229static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
230{
231	return container_of(slice, struct ccc_page, cpg_cl);
232}
233
234struct cl_page    *ccc_vmpage_page_transient(struct page *vmpage);
235
236struct ccc_device {
237	struct cl_device    cdv_cl;
238	struct super_block *cdv_sb;
239	struct cl_device   *cdv_next;
240};
241
242struct ccc_lock {
243	struct cl_lock_slice clk_cl;
244};
245
246struct ccc_req {
247	struct cl_req_slice  crq_cl;
248};
249
250void *ccc_key_init	(const struct lu_context *ctx,
251			   struct lu_context_key *key);
252void  ccc_key_fini	(const struct lu_context *ctx,
253			   struct lu_context_key *key, void *data);
254void *ccc_session_key_init(const struct lu_context *ctx,
255			   struct lu_context_key *key);
256void  ccc_session_key_fini(const struct lu_context *ctx,
257			   struct lu_context_key *key, void *data);
258
259int	      ccc_device_init  (const struct lu_env *env,
260				   struct lu_device *d,
261				   const char *name, struct lu_device *next);
262struct lu_device *ccc_device_fini (const struct lu_env *env,
263				   struct lu_device *d);
264struct lu_device *ccc_device_alloc(const struct lu_env *env,
265				   struct lu_device_type *t,
266				   struct lustre_cfg *cfg,
267				   const struct lu_device_operations *luops,
268				   const struct cl_device_operations *clops);
269struct lu_device *ccc_device_free (const struct lu_env *env,
270				   struct lu_device *d);
271struct lu_object *ccc_object_alloc(const struct lu_env *env,
272				   const struct lu_object_header *hdr,
273				   struct lu_device *dev,
274				   const struct cl_object_operations *clops,
275				   const struct lu_object_operations *luops);
276
277int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
278		 struct cl_req *req);
279void ccc_umount(const struct lu_env *env, struct cl_device *dev);
280int ccc_global_init(struct lu_device_type *device_type);
281void ccc_global_fini(struct lu_device_type *device_type);
282int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
283		     const struct cl_object_conf *conf);
284int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
285		    const struct lu_object_conf *conf);
286void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
287int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
288		  struct cl_lock *lock, const struct cl_io *io,
289		  const struct cl_lock_operations *lkops);
290int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
291		 const struct cl_attr *attr, unsigned valid);
292int ccc_object_glimpse(const struct lu_env *env,
293		       const struct cl_object *obj, struct ost_lvb *lvb);
294int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
295		 const struct cl_object_conf *conf);
296struct page *ccc_page_vmpage(const struct lu_env *env,
297			    const struct cl_page_slice *slice);
298int ccc_page_is_under_lock(const struct lu_env *env,
299			   const struct cl_page_slice *slice, struct cl_io *io);
300int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
301void ccc_transient_page_verify(const struct cl_page *page);
302int  ccc_transient_page_own(const struct lu_env *env,
303			    const struct cl_page_slice *slice,
304			    struct cl_io *io, int nonblock);
305void ccc_transient_page_assume(const struct lu_env *env,
306			       const struct cl_page_slice *slice,
307			       struct cl_io *io);
308void ccc_transient_page_unassume(const struct lu_env *env,
309				 const struct cl_page_slice *slice,
310				 struct cl_io *io);
311void ccc_transient_page_disown(const struct lu_env *env,
312			       const struct cl_page_slice *slice,
313			       struct cl_io *io);
314void ccc_transient_page_discard(const struct lu_env *env,
315				const struct cl_page_slice *slice,
316				struct cl_io *io);
317int ccc_transient_page_prep(const struct lu_env *env,
318			    const struct cl_page_slice *slice,
319			    struct cl_io *io);
320void ccc_lock_delete(const struct lu_env *env,
321		     const struct cl_lock_slice *slice);
322void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
323int ccc_lock_enqueue(const struct lu_env *env,
324		     const struct cl_lock_slice *slice,
325		     struct cl_io *io, __u32 enqflags);
326int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
327int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
328int ccc_lock_fits_into(const struct lu_env *env,
329		       const struct cl_lock_slice *slice,
330		       const struct cl_lock_descr *need,
331		       const struct cl_io *io);
332void ccc_lock_state(const struct lu_env *env,
333		    const struct cl_lock_slice *slice,
334		    enum cl_lock_state state);
335
336void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
337int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
338			  __u32 enqflags, enum cl_lock_mode mode,
339			  pgoff_t start, pgoff_t end);
340int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
341		    __u32 enqflags, enum cl_lock_mode mode,
342		    loff_t start, loff_t end);
343void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
344void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
345		    size_t nob);
346void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
347		       struct cl_io *io);
348int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
349		  struct cl_io *io, loff_t start, size_t count, int *exceed);
350void ccc_req_completion(const struct lu_env *env,
351			const struct cl_req_slice *slice, int ioret);
352void ccc_req_attr_set(const struct lu_env *env,
353		      const struct cl_req_slice *slice,
354		      const struct cl_object *obj,
355		      struct cl_req_attr *oa, u64 flags);
356
357struct lu_device   *ccc2lu_dev      (struct ccc_device *vdv);
358struct lu_object   *ccc2lu	  (struct ccc_object *vob);
359struct ccc_device  *lu2ccc_dev      (const struct lu_device *d);
360struct ccc_device  *cl2ccc_dev      (const struct cl_device *d);
361struct ccc_object  *lu2ccc	  (const struct lu_object *obj);
362struct ccc_object  *cl2ccc	  (const struct cl_object *obj);
363struct ccc_lock    *cl2ccc_lock     (const struct cl_lock_slice *slice);
364struct ccc_io      *cl2ccc_io       (const struct lu_env *env,
365				     const struct cl_io_slice *slice);
366struct ccc_req     *cl2ccc_req      (const struct cl_req_slice *slice);
367struct page	 *cl2vm_page      (const struct cl_page_slice *slice);
368struct inode       *ccc_object_inode(const struct cl_object *obj);
369struct ccc_object  *cl_inode2ccc    (struct inode *inode);
370
371int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
372		   struct obd_capa *capa);
373
374struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
375int ccc_object_invariant(const struct cl_object *obj);
376int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
377void cl_inode_fini(struct inode *inode);
378int cl_local_size(struct inode *inode);
379
380__u16 ll_dirent_type_get(struct lu_dirent *ent);
381__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
382__u32 cl_fid_build_gen(const struct lu_fid *fid);
383
384# define CLOBINVRNT(env, clob, expr)					\
385	((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
386
387int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
388int cl_ocd_update(struct obd_device *host,
389		  struct obd_device *watched,
390		  enum obd_notify_event ev, void *owner, void *data);
391
392struct ccc_grouplock {
393	struct lu_env   *cg_env;
394	struct cl_io    *cg_io;
395	struct cl_lock  *cg_lock;
396	unsigned long    cg_gid;
397};
398
399int  cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
400		      struct ccc_grouplock *cg);
401void cl_put_grouplock(struct ccc_grouplock *cg);
402
403/**
404 * New interfaces to get and put lov_stripe_md from lov layer. This violates
405 * layering because lov_stripe_md is supposed to be a private data in lov.
406 *
407 * NB: If you find you have to use these interfaces for your new code, please
408 * think about it again. These interfaces may be removed in the future for
409 * better layering. */
410struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
411void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
412int lov_read_and_clear_async_rc(struct cl_object *clob);
413
414struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
415void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
416
417/**
418 * Data structure managing a client's cached clean pages. An LRU of
419 * pages is maintained, along with other statistics.
420 */
421struct cl_client_cache {
422	atomic_t	ccc_users;    /* # of users (OSCs) of this data */
423	struct list_head	ccc_lru;      /* LRU list of cached clean pages */
424	spinlock_t	ccc_lru_lock; /* lock for list */
425	atomic_t	ccc_lru_left; /* # of LRU entries available */
426	unsigned long	ccc_lru_max;  /* Max # of LRU entries possible */
427	unsigned int	ccc_lru_shrinkers; /* # of threads reclaiming */
428};
429
430#endif /*LCLIENT_H */
431