[go: nahoru, domu]

1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl_device and cl_device_type implementation for VVP layer.
37 *
38 *   Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41#define DEBUG_SUBSYSTEM S_LLITE
42
43
44#include "../include/obd.h"
45#include "../include/lustre_lite.h"
46#include "llite_internal.h"
47#include "vvp_internal.h"
48
49/*****************************************************************************
50 *
51 * Vvp device and device type functions.
52 *
53 */
54
55/*
56 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
57 * "llite_" (var. "ll_") prefix.
58 */
59
60static struct kmem_cache *vvp_thread_kmem;
61static struct kmem_cache *vvp_session_kmem;
62static struct lu_kmem_descr vvp_caches[] = {
63	{
64		.ckd_cache = &vvp_thread_kmem,
65		.ckd_name  = "vvp_thread_kmem",
66		.ckd_size  = sizeof (struct vvp_thread_info),
67	},
68	{
69		.ckd_cache = &vvp_session_kmem,
70		.ckd_name  = "vvp_session_kmem",
71		.ckd_size  = sizeof (struct vvp_session)
72	},
73	{
74		.ckd_cache = NULL
75	}
76};
77
78static void *vvp_key_init(const struct lu_context *ctx,
79			  struct lu_context_key *key)
80{
81	struct vvp_thread_info *info;
82
83	OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, GFP_NOFS);
84	if (info == NULL)
85		info = ERR_PTR(-ENOMEM);
86	return info;
87}
88
89static void vvp_key_fini(const struct lu_context *ctx,
90			 struct lu_context_key *key, void *data)
91{
92	struct vvp_thread_info *info = data;
93	OBD_SLAB_FREE_PTR(info, vvp_thread_kmem);
94}
95
96static void *vvp_session_key_init(const struct lu_context *ctx,
97				  struct lu_context_key *key)
98{
99	struct vvp_session *session;
100
101	OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
102	if (session == NULL)
103		session = ERR_PTR(-ENOMEM);
104	return session;
105}
106
107static void vvp_session_key_fini(const struct lu_context *ctx,
108				 struct lu_context_key *key, void *data)
109{
110	struct vvp_session *session = data;
111	OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
112}
113
114
115struct lu_context_key vvp_key = {
116	.lct_tags = LCT_CL_THREAD,
117	.lct_init = vvp_key_init,
118	.lct_fini = vvp_key_fini
119};
120
121struct lu_context_key vvp_session_key = {
122	.lct_tags = LCT_SESSION,
123	.lct_init = vvp_session_key_init,
124	.lct_fini = vvp_session_key_fini
125};
126
127/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
128LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
129
130static const struct lu_device_operations vvp_lu_ops = {
131	.ldo_object_alloc      = vvp_object_alloc
132};
133
134static const struct cl_device_operations vvp_cl_ops = {
135	.cdo_req_init = ccc_req_init
136};
137
138static struct lu_device *vvp_device_alloc(const struct lu_env *env,
139					  struct lu_device_type *t,
140					  struct lustre_cfg *cfg)
141{
142	return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
143}
144
145static const struct lu_device_type_operations vvp_device_type_ops = {
146	.ldto_init = vvp_type_init,
147	.ldto_fini = vvp_type_fini,
148
149	.ldto_start = vvp_type_start,
150	.ldto_stop  = vvp_type_stop,
151
152	.ldto_device_alloc = vvp_device_alloc,
153	.ldto_device_free  = ccc_device_free,
154	.ldto_device_init  = ccc_device_init,
155	.ldto_device_fini  = ccc_device_fini
156};
157
158struct lu_device_type vvp_device_type = {
159	.ldt_tags     = LU_DEVICE_CL,
160	.ldt_name     = LUSTRE_VVP_NAME,
161	.ldt_ops      = &vvp_device_type_ops,
162	.ldt_ctx_tags = LCT_CL_THREAD
163};
164
165/**
166 * A mutex serializing calls to vvp_inode_fini() under extreme memory
167 * pressure, when environments cannot be allocated.
168 */
169int vvp_global_init(void)
170{
171	int result;
172
173	result = lu_kmem_init(vvp_caches);
174	if (result == 0) {
175		result = ccc_global_init(&vvp_device_type);
176		if (result != 0)
177			lu_kmem_fini(vvp_caches);
178	}
179	return result;
180}
181
182void vvp_global_fini(void)
183{
184	ccc_global_fini(&vvp_device_type);
185	lu_kmem_fini(vvp_caches);
186}
187
188
189/*****************************************************************************
190 *
191 * mirror obd-devices into cl devices.
192 *
193 */
194
195int cl_sb_init(struct super_block *sb)
196{
197	struct ll_sb_info *sbi;
198	struct cl_device  *cl;
199	struct lu_env     *env;
200	int rc = 0;
201	int refcheck;
202
203	sbi  = ll_s2sbi(sb);
204	env = cl_env_get(&refcheck);
205	if (!IS_ERR(env)) {
206		cl = cl_type_setup(env, NULL, &vvp_device_type,
207				   sbi->ll_dt_exp->exp_obd->obd_lu_dev);
208		if (!IS_ERR(cl)) {
209			cl2ccc_dev(cl)->cdv_sb = sb;
210			sbi->ll_cl = cl;
211			sbi->ll_site = cl2lu_dev(cl)->ld_site;
212		}
213		cl_env_put(env, &refcheck);
214	} else
215		rc = PTR_ERR(env);
216	return rc;
217}
218
219int cl_sb_fini(struct super_block *sb)
220{
221	struct ll_sb_info *sbi;
222	struct lu_env     *env;
223	struct cl_device  *cld;
224	int		refcheck;
225	int		result;
226
227	sbi = ll_s2sbi(sb);
228	env = cl_env_get(&refcheck);
229	if (!IS_ERR(env)) {
230		cld = sbi->ll_cl;
231
232		if (cld != NULL) {
233			cl_stack_fini(env, cld);
234			sbi->ll_cl = NULL;
235			sbi->ll_site = NULL;
236		}
237		cl_env_put(env, &refcheck);
238		result = 0;
239	} else {
240		CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
241		result = PTR_ERR(env);
242	}
243	/*
244	 * If mount failed (sbi->ll_cl == NULL), and this there are no other
245	 * mounts, stop device types manually (this usually happens
246	 * automatically when last device is destroyed).
247	 */
248	lu_types_stop();
249	return result;
250}
251
252/****************************************************************************
253 *
254 * /proc/fs/lustre/llite/$MNT/dump_page_cache
255 *
256 ****************************************************************************/
257
258/*
259 * To represent contents of a page cache as a byte stream, following
260 * information if encoded in 64bit offset:
261 *
262 *       - file hash bucket in lu_site::ls_hash[]       28bits
263 *
264 *       - how far file is from bucket head	      4bits
265 *
266 *       - page index				   32bits
267 *
268 * First two data identify a file in the cache uniquely.
269 */
270
271#define PGC_OBJ_SHIFT (32 + 4)
272#define PGC_DEPTH_SHIFT (32)
273
274struct vvp_pgcache_id {
275	unsigned		 vpi_bucket;
276	unsigned		 vpi_depth;
277	uint32_t		 vpi_index;
278
279	unsigned		 vpi_curdep;
280	struct lu_object_header *vpi_obj;
281};
282
283static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
284{
285	CLASSERT(sizeof(pos) == sizeof(__u64));
286
287	id->vpi_index  = pos & 0xffffffff;
288	id->vpi_depth  = (pos >> PGC_DEPTH_SHIFT) & 0xf;
289	id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
290}
291
292static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
293{
294	return
295		((__u64)id->vpi_index) |
296		((__u64)id->vpi_depth  << PGC_DEPTH_SHIFT) |
297		((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
298}
299
300static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
301			       struct hlist_node *hnode, void *data)
302{
303	struct vvp_pgcache_id   *id  = data;
304	struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
305
306	if (id->vpi_curdep-- > 0)
307		return 0; /* continue */
308
309	if (lu_object_is_dying(hdr))
310		return 1;
311
312	cfs_hash_get(hs, hnode);
313	id->vpi_obj = hdr;
314	return 1;
315}
316
317static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
318					 struct lu_device *dev,
319					 struct vvp_pgcache_id *id)
320{
321	LASSERT(lu_device_is_cl(dev));
322
323	id->vpi_depth &= 0xf;
324	id->vpi_obj    = NULL;
325	id->vpi_curdep = id->vpi_depth;
326
327	cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
328				vvp_pgcache_obj_get, id);
329	if (id->vpi_obj != NULL) {
330		struct lu_object *lu_obj;
331
332		lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
333		if (lu_obj != NULL) {
334			lu_object_ref_add(lu_obj, "dump", current);
335			return lu2cl(lu_obj);
336		}
337		lu_object_put(env, lu_object_top(id->vpi_obj));
338
339	} else if (id->vpi_curdep > 0) {
340		id->vpi_depth = 0xf;
341	}
342	return NULL;
343}
344
345static loff_t vvp_pgcache_find(const struct lu_env *env,
346			       struct lu_device *dev, loff_t pos)
347{
348	struct cl_object     *clob;
349	struct lu_site       *site;
350	struct vvp_pgcache_id id;
351
352	site = dev->ld_site;
353	vvp_pgcache_id_unpack(pos, &id);
354
355	while (1) {
356		if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
357			return ~0ULL;
358		clob = vvp_pgcache_obj(env, dev, &id);
359		if (clob != NULL) {
360			struct cl_object_header *hdr;
361			int		      nr;
362			struct cl_page	  *pg;
363
364			/* got an object. Find next page. */
365			hdr = cl_object_header(clob);
366
367			spin_lock(&hdr->coh_page_guard);
368			nr = radix_tree_gang_lookup(&hdr->coh_tree,
369						    (void **)&pg,
370						    id.vpi_index, 1);
371			if (nr > 0) {
372				id.vpi_index = pg->cp_index;
373				/* Cant support over 16T file */
374				nr = !(pg->cp_index > 0xffffffff);
375			}
376			spin_unlock(&hdr->coh_page_guard);
377
378			lu_object_ref_del(&clob->co_lu, "dump", current);
379			cl_object_put(env, clob);
380			if (nr > 0)
381				return vvp_pgcache_id_pack(&id);
382		}
383		/* to the next object. */
384		++id.vpi_depth;
385		id.vpi_depth &= 0xf;
386		if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
387			return ~0ULL;
388		id.vpi_index = 0;
389	}
390}
391
392#define seq_page_flag(seq, page, flag, has_flags) do {		  \
393	if (test_bit(PG_##flag, &(page)->flags)) {		  \
394		seq_printf(seq, "%s"#flag, has_flags ? "|" : "");       \
395		has_flags = 1;					  \
396	}							       \
397} while (0)
398
399static void vvp_pgcache_page_show(const struct lu_env *env,
400				  struct seq_file *seq, struct cl_page *page)
401{
402	struct ccc_page *cpg;
403	struct page      *vmpage;
404	int	      has_flags;
405
406	cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
407	vmpage = cpg->cpg_page;
408	seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
409		   0 /* gen */,
410		   cpg, page,
411		   "none",
412		   cpg->cpg_write_queued ? "wq" : "- ",
413		   cpg->cpg_defer_uptodate ? "du" : "- ",
414		   PageWriteback(vmpage) ? "wb" : "-",
415		   vmpage, vmpage->mapping->host->i_ino,
416		   vmpage->mapping->host->i_generation,
417		   vmpage->mapping->host, vmpage->index,
418		   page_count(vmpage));
419	has_flags = 0;
420	seq_page_flag(seq, vmpage, locked, has_flags);
421	seq_page_flag(seq, vmpage, error, has_flags);
422	seq_page_flag(seq, vmpage, referenced, has_flags);
423	seq_page_flag(seq, vmpage, uptodate, has_flags);
424	seq_page_flag(seq, vmpage, dirty, has_flags);
425	seq_page_flag(seq, vmpage, writeback, has_flags);
426	seq_printf(seq, "%s]\n", has_flags ? "" : "-");
427}
428
429static int vvp_pgcache_show(struct seq_file *f, void *v)
430{
431	loff_t		   pos;
432	struct ll_sb_info       *sbi;
433	struct cl_object	*clob;
434	struct lu_env	   *env;
435	struct cl_page	  *page;
436	struct cl_object_header *hdr;
437	struct vvp_pgcache_id    id;
438	int		      refcheck;
439	int		      result;
440
441	env = cl_env_get(&refcheck);
442	if (!IS_ERR(env)) {
443		pos = *(loff_t *) v;
444		vvp_pgcache_id_unpack(pos, &id);
445		sbi = f->private;
446		clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
447		if (clob != NULL) {
448			hdr = cl_object_header(clob);
449
450			spin_lock(&hdr->coh_page_guard);
451			page = cl_page_lookup(hdr, id.vpi_index);
452			spin_unlock(&hdr->coh_page_guard);
453
454			seq_printf(f, "%8x@"DFID": ",
455				   id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
456			if (page != NULL) {
457				vvp_pgcache_page_show(env, f, page);
458				cl_page_put(env, page);
459			} else
460				seq_puts(f, "missing\n");
461			lu_object_ref_del(&clob->co_lu, "dump", current);
462			cl_object_put(env, clob);
463		} else
464			seq_printf(f, "%llx missing\n", pos);
465		cl_env_put(env, &refcheck);
466		result = 0;
467	} else
468		result = PTR_ERR(env);
469	return result;
470}
471
472static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
473{
474	struct ll_sb_info *sbi;
475	struct lu_env     *env;
476	int		refcheck;
477
478	sbi = f->private;
479
480	env = cl_env_get(&refcheck);
481	if (!IS_ERR(env)) {
482		sbi = f->private;
483		if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
484			pos = ERR_PTR(-EFBIG);
485		else {
486			*pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
487						*pos);
488			if (*pos == ~0ULL)
489				pos = NULL;
490		}
491		cl_env_put(env, &refcheck);
492	}
493	return pos;
494}
495
496static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
497{
498	struct ll_sb_info *sbi;
499	struct lu_env     *env;
500	int		refcheck;
501
502	env = cl_env_get(&refcheck);
503	if (!IS_ERR(env)) {
504		sbi = f->private;
505		*pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
506		if (*pos == ~0ULL)
507			pos = NULL;
508		cl_env_put(env, &refcheck);
509	}
510	return pos;
511}
512
513static void vvp_pgcache_stop(struct seq_file *f, void *v)
514{
515	/* Nothing to do */
516}
517
518static struct seq_operations vvp_pgcache_ops = {
519	.start = vvp_pgcache_start,
520	.next  = vvp_pgcache_next,
521	.stop  = vvp_pgcache_stop,
522	.show  = vvp_pgcache_show
523};
524
525static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
526{
527	struct ll_sb_info     *sbi = PDE_DATA(inode);
528	struct seq_file       *seq;
529	int		    result;
530
531	result = seq_open(filp, &vvp_pgcache_ops);
532	if (result == 0) {
533		seq = filp->private_data;
534		seq->private = sbi;
535	}
536	return result;
537}
538
539const struct file_operations vvp_dump_pgcache_file_ops = {
540	.owner   = THIS_MODULE,
541	.open    = vvp_dump_pgcache_seq_open,
542	.read    = seq_read,
543	.llseek	 = seq_lseek,
544	.release = seq_release,
545};
546