1/* 2 * GPL HEADER START 3 * 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 only, 8 * as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License version 2 for more details (a copy is included 14 * in the LICENSE file that accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License 17 * version 2 along with this program; If not, see 18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf 19 * 20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 21 * CA 95054 USA or visit www.sun.com if you need additional information or 22 * have any questions. 23 * 24 * GPL HEADER END 25 */ 26/* 27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Use is subject to license terms. 29 * 30 * Copyright (c) 2010, 2012, Intel Corporation. 31 */ 32/* 33 * This file is part of Lustre, http://www.lustre.org/ 34 * Lustre is a trademark of Sun Microsystems, Inc. 35 * 36 * lustre/ldlm/ldlm_resource.c 37 * 38 * Author: Phil Schwan <phil@clusterfs.com> 39 * Author: Peter Braam <braam@clusterfs.com> 40 */ 41 42#define DEBUG_SUBSYSTEM S_LDLM 43#include "../include/lustre_dlm.h" 44#include "../include/lustre_fid.h" 45#include "../include/obd_class.h" 46#include "ldlm_internal.h" 47 48struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab; 49 50int ldlm_srv_namespace_nr = 0; 51int ldlm_cli_namespace_nr = 0; 52 53struct mutex ldlm_srv_namespace_lock; 54LIST_HEAD(ldlm_srv_namespace_list); 55 56struct mutex ldlm_cli_namespace_lock; 57/* Client Namespaces that have active resources in them. 58 * Once all resources go away, ldlm_poold moves such namespaces to the 59 * inactive list */ 60LIST_HEAD(ldlm_cli_active_namespace_list); 61/* Client namespaces that don't have any locks in them */ 62LIST_HEAD(ldlm_cli_inactive_namespace_list); 63 64struct proc_dir_entry *ldlm_type_proc_dir = NULL; 65struct proc_dir_entry *ldlm_ns_proc_dir = NULL; 66struct proc_dir_entry *ldlm_svc_proc_dir = NULL; 67 68extern unsigned int ldlm_cancel_unused_locks_before_replay; 69 70/* during debug dump certain amount of granted locks for one resource to avoid 71 * DDOS. */ 72unsigned int ldlm_dump_granted_max = 256; 73 74#if defined (CONFIG_PROC_FS) 75static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer, 76 size_t count, loff_t *off) 77{ 78 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); 79 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); 80 return count; 81} 82LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns); 83 84LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint); 85LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint); 86 87int ldlm_proc_setup(void) 88{ 89 int rc; 90 struct lprocfs_vars list[] = { 91 { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 }, 92 { "dump_granted_max", &ldlm_rw_uint_fops, 93 &ldlm_dump_granted_max }, 94 { "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops, 95 &ldlm_cancel_unused_locks_before_replay }, 96 { NULL }}; 97 LASSERT(ldlm_ns_proc_dir == NULL); 98 99 ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME, 100 proc_lustre_root, 101 NULL, NULL); 102 if (IS_ERR(ldlm_type_proc_dir)) { 103 CERROR("LProcFS failed in ldlm-init\n"); 104 rc = PTR_ERR(ldlm_type_proc_dir); 105 goto err; 106 } 107 108 ldlm_ns_proc_dir = lprocfs_register("namespaces", 109 ldlm_type_proc_dir, 110 NULL, NULL); 111 if (IS_ERR(ldlm_ns_proc_dir)) { 112 CERROR("LProcFS failed in ldlm-init\n"); 113 rc = PTR_ERR(ldlm_ns_proc_dir); 114 goto err_type; 115 } 116 117 ldlm_svc_proc_dir = lprocfs_register("services", 118 ldlm_type_proc_dir, 119 NULL, NULL); 120 if (IS_ERR(ldlm_svc_proc_dir)) { 121 CERROR("LProcFS failed in ldlm-init\n"); 122 rc = PTR_ERR(ldlm_svc_proc_dir); 123 goto err_ns; 124 } 125 126 rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL); 127 128 return 0; 129 130err_ns: 131 lprocfs_remove(&ldlm_ns_proc_dir); 132err_type: 133 lprocfs_remove(&ldlm_type_proc_dir); 134err: 135 ldlm_svc_proc_dir = NULL; 136 ldlm_type_proc_dir = NULL; 137 ldlm_ns_proc_dir = NULL; 138 return rc; 139} 140 141void ldlm_proc_cleanup(void) 142{ 143 if (ldlm_svc_proc_dir) 144 lprocfs_remove(&ldlm_svc_proc_dir); 145 146 if (ldlm_ns_proc_dir) 147 lprocfs_remove(&ldlm_ns_proc_dir); 148 149 if (ldlm_type_proc_dir) 150 lprocfs_remove(&ldlm_type_proc_dir); 151 152 ldlm_svc_proc_dir = NULL; 153 ldlm_type_proc_dir = NULL; 154 ldlm_ns_proc_dir = NULL; 155} 156 157static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v) 158{ 159 struct ldlm_namespace *ns = m->private; 160 __u64 res = 0; 161 struct cfs_hash_bd bd; 162 int i; 163 164 /* result is not strictly consistent */ 165 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i) 166 res += cfs_hash_bd_count_get(&bd); 167 return lprocfs_rd_u64(m, &res); 168} 169LPROC_SEQ_FOPS_RO(lprocfs_ns_resources); 170 171static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v) 172{ 173 struct ldlm_namespace *ns = m->private; 174 __u64 locks; 175 176 locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS, 177 LPROCFS_FIELDS_FLAGS_SUM); 178 return lprocfs_rd_u64(m, &locks); 179} 180LPROC_SEQ_FOPS_RO(lprocfs_ns_locks); 181 182static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v) 183{ 184 struct ldlm_namespace *ns = m->private; 185 __u32 *nr = &ns->ns_max_unused; 186 187 if (ns_connect_lru_resize(ns)) 188 nr = &ns->ns_nr_unused; 189 return lprocfs_rd_uint(m, nr); 190} 191 192static ssize_t lprocfs_lru_size_seq_write(struct file *file, 193 const char __user *buffer, 194 size_t count, loff_t *off) 195{ 196 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private; 197 char dummy[MAX_STRING_SIZE + 1]; 198 unsigned long tmp; 199 int lru_resize; 200 int err; 201 202 dummy[MAX_STRING_SIZE] = '\0'; 203 if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) 204 return -EFAULT; 205 206 if (strncmp(dummy, "clear", 5) == 0) { 207 CDEBUG(D_DLMTRACE, 208 "dropping all unused locks from namespace %s\n", 209 ldlm_ns_name(ns)); 210 if (ns_connect_lru_resize(ns)) { 211 int canceled, unused = ns->ns_nr_unused; 212 213 /* Try to cancel all @ns_nr_unused locks. */ 214 canceled = ldlm_cancel_lru(ns, unused, 0, 215 LDLM_CANCEL_PASSED); 216 if (canceled < unused) { 217 CDEBUG(D_DLMTRACE, 218 "not all requested locks are canceled, " 219 "requested: %d, canceled: %d\n", unused, 220 canceled); 221 return -EINVAL; 222 } 223 } else { 224 tmp = ns->ns_max_unused; 225 ns->ns_max_unused = 0; 226 ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED); 227 ns->ns_max_unused = tmp; 228 } 229 return count; 230 } 231 232 err = kstrtoul(dummy, 10, &tmp); 233 if (err != 0) { 234 CERROR("invalid value written\n"); 235 return -EINVAL; 236 } 237 lru_resize = (tmp == 0); 238 239 if (ns_connect_lru_resize(ns)) { 240 if (!lru_resize) 241 ns->ns_max_unused = (unsigned int)tmp; 242 243 if (tmp > ns->ns_nr_unused) 244 tmp = ns->ns_nr_unused; 245 tmp = ns->ns_nr_unused - tmp; 246 247 CDEBUG(D_DLMTRACE, 248 "changing namespace %s unused locks from %u to %u\n", 249 ldlm_ns_name(ns), ns->ns_nr_unused, 250 (unsigned int)tmp); 251 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED); 252 253 if (!lru_resize) { 254 CDEBUG(D_DLMTRACE, 255 "disable lru_resize for namespace %s\n", 256 ldlm_ns_name(ns)); 257 ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE; 258 } 259 } else { 260 CDEBUG(D_DLMTRACE, 261 "changing namespace %s max_unused from %u to %u\n", 262 ldlm_ns_name(ns), ns->ns_max_unused, 263 (unsigned int)tmp); 264 ns->ns_max_unused = (unsigned int)tmp; 265 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); 266 267 /* Make sure that LRU resize was originally supported before 268 * turning it on here. */ 269 if (lru_resize && 270 (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { 271 CDEBUG(D_DLMTRACE, 272 "enable lru_resize for namespace %s\n", 273 ldlm_ns_name(ns)); 274 ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE; 275 } 276 } 277 278 return count; 279} 280LPROC_SEQ_FOPS(lprocfs_lru_size); 281 282static int lprocfs_elc_seq_show(struct seq_file *m, void *v) 283{ 284 struct ldlm_namespace *ns = m->private; 285 unsigned int supp = ns_connect_cancelset(ns); 286 287 return lprocfs_rd_uint(m, &supp); 288} 289 290static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer, 291 size_t count, loff_t *off) 292{ 293 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private; 294 unsigned int supp = -1; 295 int rc; 296 297 rc = lprocfs_wr_uint(file, buffer, count, &supp); 298 if (rc < 0) 299 return rc; 300 301 if (supp == 0) 302 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET; 303 else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET) 304 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET; 305 return count; 306} 307LPROC_SEQ_FOPS(lprocfs_elc); 308 309void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns) 310{ 311 if (ns->ns_proc_dir_entry == NULL) 312 CERROR("dlm namespace %s has no procfs dir?\n", 313 ldlm_ns_name(ns)); 314 else 315 lprocfs_remove(&ns->ns_proc_dir_entry); 316 317 if (ns->ns_stats != NULL) 318 lprocfs_free_stats(&ns->ns_stats); 319} 320 321#define LDLM_NS_ADD_VAR(name, var, ops) \ 322 do { \ 323 snprintf(lock_name, MAX_STRING_SIZE, name); \ 324 lock_vars[0].data = var; \ 325 lock_vars[0].fops = ops; \ 326 lprocfs_add_vars(ns_pde, lock_vars, NULL); \ 327 } while (0) 328 329int ldlm_namespace_proc_register(struct ldlm_namespace *ns) 330{ 331 struct lprocfs_vars lock_vars[2]; 332 char lock_name[MAX_STRING_SIZE + 1]; 333 struct proc_dir_entry *ns_pde; 334 335 LASSERT(ns != NULL); 336 LASSERT(ns->ns_rs_hash != NULL); 337 338 if (ns->ns_proc_dir_entry != NULL) { 339 ns_pde = ns->ns_proc_dir_entry; 340 } else { 341 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir); 342 if (ns_pde == NULL) 343 return -ENOMEM; 344 ns->ns_proc_dir_entry = ns_pde; 345 } 346 347 ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); 348 if (ns->ns_stats == NULL) 349 return -ENOMEM; 350 351 lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS, 352 LPROCFS_CNTR_AVGMINMAX, "locks", "locks"); 353 354 lock_name[MAX_STRING_SIZE] = '\0'; 355 356 memset(lock_vars, 0, sizeof(lock_vars)); 357 lock_vars[0].name = lock_name; 358 359 LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops); 360 LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops); 361 362 if (ns_is_client(ns)) { 363 LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused, 364 &ldlm_uint_fops); 365 LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops); 366 LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age, 367 &ldlm_rw_uint_fops); 368 LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops); 369 } else { 370 LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit, 371 &ldlm_rw_uint_fops); 372 LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts, 373 &ldlm_uint_fops); 374 LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size, 375 &ldlm_rw_uint_fops); 376 LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time, 377 &ldlm_rw_uint_fops); 378 LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks, 379 &ldlm_rw_uint_fops); 380 LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast, 381 &ldlm_rw_uint_fops); 382 } 383 return 0; 384} 385#undef MAX_STRING_SIZE 386#else /* CONFIG_PROC_FS */ 387 388#define ldlm_namespace_proc_unregister(ns) ({;}) 389#define ldlm_namespace_proc_register(ns) ({0;}) 390 391#endif /* CONFIG_PROC_FS */ 392 393static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, 394 const void *key, unsigned mask) 395{ 396 const struct ldlm_res_id *id = key; 397 unsigned val = 0; 398 unsigned i; 399 400 for (i = 0; i < RES_NAME_SIZE; i++) 401 val += id->name[i]; 402 return val & mask; 403} 404 405static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs, 406 const void *key, unsigned mask) 407{ 408 const struct ldlm_res_id *id = key; 409 struct lu_fid fid; 410 __u32 hash; 411 __u32 val; 412 413 fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF]; 414 fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF]; 415 fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32); 416 417 hash = fid_flatten32(&fid); 418 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ 419 if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) { 420 val = id->name[LUSTRE_RES_ID_HSH_OFF]; 421 hash += (val >> 5) + (val << 11); 422 } else { 423 val = fid_oid(&fid); 424 } 425 hash = hash_long(hash, hs->hs_bkt_bits); 426 /* give me another random factor */ 427 hash -= hash_long((unsigned long)hs, val % 11 + 3); 428 429 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; 430 hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1); 431 432 return hash & mask; 433} 434 435static void *ldlm_res_hop_key(struct hlist_node *hnode) 436{ 437 struct ldlm_resource *res; 438 439 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 440 return &res->lr_name; 441} 442 443static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode) 444{ 445 struct ldlm_resource *res; 446 447 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 448 return ldlm_res_eq((const struct ldlm_res_id *)key, 449 (const struct ldlm_res_id *)&res->lr_name); 450} 451 452static void *ldlm_res_hop_object(struct hlist_node *hnode) 453{ 454 return hlist_entry(hnode, struct ldlm_resource, lr_hash); 455} 456 457static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode) 458{ 459 struct ldlm_resource *res; 460 461 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 462 ldlm_resource_getref(res); 463} 464 465static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) 466{ 467 struct ldlm_resource *res; 468 469 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 470 /* cfs_hash_for_each_nolock is the only chance we call it */ 471 ldlm_resource_putref_locked(res); 472} 473 474static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) 475{ 476 struct ldlm_resource *res; 477 478 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 479 ldlm_resource_putref(res); 480} 481 482cfs_hash_ops_t ldlm_ns_hash_ops = { 483 .hs_hash = ldlm_res_hop_hash, 484 .hs_key = ldlm_res_hop_key, 485 .hs_keycmp = ldlm_res_hop_keycmp, 486 .hs_keycpy = NULL, 487 .hs_object = ldlm_res_hop_object, 488 .hs_get = ldlm_res_hop_get_locked, 489 .hs_put_locked = ldlm_res_hop_put_locked, 490 .hs_put = ldlm_res_hop_put 491}; 492 493cfs_hash_ops_t ldlm_ns_fid_hash_ops = { 494 .hs_hash = ldlm_res_hop_fid_hash, 495 .hs_key = ldlm_res_hop_key, 496 .hs_keycmp = ldlm_res_hop_keycmp, 497 .hs_keycpy = NULL, 498 .hs_object = ldlm_res_hop_object, 499 .hs_get = ldlm_res_hop_get_locked, 500 .hs_put_locked = ldlm_res_hop_put_locked, 501 .hs_put = ldlm_res_hop_put 502}; 503 504typedef struct { 505 ldlm_ns_type_t nsd_type; 506 /** hash bucket bits */ 507 unsigned nsd_bkt_bits; 508 /** hash bits */ 509 unsigned nsd_all_bits; 510 /** hash operations */ 511 cfs_hash_ops_t *nsd_hops; 512} ldlm_ns_hash_def_t; 513 514ldlm_ns_hash_def_t ldlm_ns_hash_defs[] = { 515 { 516 .nsd_type = LDLM_NS_TYPE_MDC, 517 .nsd_bkt_bits = 11, 518 .nsd_all_bits = 16, 519 .nsd_hops = &ldlm_ns_fid_hash_ops, 520 }, 521 { 522 .nsd_type = LDLM_NS_TYPE_MDT, 523 .nsd_bkt_bits = 14, 524 .nsd_all_bits = 21, 525 .nsd_hops = &ldlm_ns_fid_hash_ops, 526 }, 527 { 528 .nsd_type = LDLM_NS_TYPE_OSC, 529 .nsd_bkt_bits = 8, 530 .nsd_all_bits = 12, 531 .nsd_hops = &ldlm_ns_hash_ops, 532 }, 533 { 534 .nsd_type = LDLM_NS_TYPE_OST, 535 .nsd_bkt_bits = 11, 536 .nsd_all_bits = 17, 537 .nsd_hops = &ldlm_ns_hash_ops, 538 }, 539 { 540 .nsd_type = LDLM_NS_TYPE_MGC, 541 .nsd_bkt_bits = 4, 542 .nsd_all_bits = 4, 543 .nsd_hops = &ldlm_ns_hash_ops, 544 }, 545 { 546 .nsd_type = LDLM_NS_TYPE_MGT, 547 .nsd_bkt_bits = 4, 548 .nsd_all_bits = 4, 549 .nsd_hops = &ldlm_ns_hash_ops, 550 }, 551 { 552 .nsd_type = LDLM_NS_TYPE_UNKNOWN, 553 }, 554}; 555 556/** 557 * Create and initialize new empty namespace. 558 */ 559struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, 560 ldlm_side_t client, 561 ldlm_appetite_t apt, 562 ldlm_ns_type_t ns_type) 563{ 564 struct ldlm_namespace *ns = NULL; 565 struct ldlm_ns_bucket *nsb; 566 ldlm_ns_hash_def_t *nsd; 567 struct cfs_hash_bd bd; 568 int idx; 569 int rc; 570 571 LASSERT(obd != NULL); 572 573 rc = ldlm_get_ref(); 574 if (rc) { 575 CERROR("ldlm_get_ref failed: %d\n", rc); 576 return NULL; 577 } 578 579 for (idx = 0;;idx++) { 580 nsd = &ldlm_ns_hash_defs[idx]; 581 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) { 582 CERROR("Unknown type %d for ns %s\n", ns_type, name); 583 goto out_ref; 584 } 585 586 if (nsd->nsd_type == ns_type) 587 break; 588 } 589 590 OBD_ALLOC_PTR(ns); 591 if (!ns) 592 goto out_ref; 593 594 ns->ns_rs_hash = cfs_hash_create(name, 595 nsd->nsd_all_bits, nsd->nsd_all_bits, 596 nsd->nsd_bkt_bits, sizeof(*nsb), 597 CFS_HASH_MIN_THETA, 598 CFS_HASH_MAX_THETA, 599 nsd->nsd_hops, 600 CFS_HASH_DEPTH | 601 CFS_HASH_BIGNAME | 602 CFS_HASH_SPIN_BKTLOCK | 603 CFS_HASH_NO_ITEMREF); 604 if (ns->ns_rs_hash == NULL) 605 goto out_ns; 606 607 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { 608 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); 609 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0); 610 nsb->nsb_namespace = ns; 611 } 612 613 ns->ns_obd = obd; 614 ns->ns_appetite = apt; 615 ns->ns_client = client; 616 617 INIT_LIST_HEAD(&ns->ns_list_chain); 618 INIT_LIST_HEAD(&ns->ns_unused_list); 619 spin_lock_init(&ns->ns_lock); 620 atomic_set(&ns->ns_bref, 0); 621 init_waitqueue_head(&ns->ns_waitq); 622 623 ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES; 624 ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS; 625 ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS; 626 627 ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT; 628 ns->ns_nr_unused = 0; 629 ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE; 630 ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE; 631 ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT; 632 ns->ns_timeouts = 0; 633 ns->ns_orig_connect_flags = 0; 634 ns->ns_connect_flags = 0; 635 ns->ns_stopping = 0; 636 rc = ldlm_namespace_proc_register(ns); 637 if (rc != 0) { 638 CERROR("Can't initialize ns proc, rc %d\n", rc); 639 goto out_hash; 640 } 641 642 idx = ldlm_namespace_nr_read(client); 643 rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client); 644 if (rc) { 645 CERROR("Can't initialize lock pool, rc %d\n", rc); 646 goto out_proc; 647 } 648 649 ldlm_namespace_register(ns, client); 650 return ns; 651out_proc: 652 ldlm_namespace_proc_unregister(ns); 653 ldlm_namespace_cleanup(ns, 0); 654out_hash: 655 cfs_hash_putref(ns->ns_rs_hash); 656out_ns: 657 OBD_FREE_PTR(ns); 658out_ref: 659 ldlm_put_ref(); 660 return NULL; 661} 662EXPORT_SYMBOL(ldlm_namespace_new); 663 664extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); 665 666/** 667 * Cancel and destroy all locks on a resource. 668 * 669 * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just 670 * clean up. This is currently only used for recovery, and we make 671 * certain assumptions as a result--notably, that we shouldn't cancel 672 * locks with refs. 673 */ 674static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, 675 __u64 flags) 676{ 677 struct list_head *tmp; 678 int rc = 0, client = ns_is_client(ldlm_res_to_ns(res)); 679 bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY); 680 681 do { 682 struct ldlm_lock *lock = NULL; 683 684 /* First, we look for non-cleaned-yet lock 685 * all cleaned locks are marked by CLEANED flag. */ 686 lock_res(res); 687 list_for_each(tmp, q) { 688 lock = list_entry(tmp, struct ldlm_lock, 689 l_res_link); 690 if (lock->l_flags & LDLM_FL_CLEANED) { 691 lock = NULL; 692 continue; 693 } 694 LDLM_LOCK_GET(lock); 695 lock->l_flags |= LDLM_FL_CLEANED; 696 break; 697 } 698 699 if (lock == NULL) { 700 unlock_res(res); 701 break; 702 } 703 704 /* Set CBPENDING so nothing in the cancellation path 705 * can match this lock. */ 706 lock->l_flags |= LDLM_FL_CBPENDING; 707 lock->l_flags |= LDLM_FL_FAILED; 708 lock->l_flags |= flags; 709 710 /* ... without sending a CANCEL message for local_only. */ 711 if (local_only) 712 lock->l_flags |= LDLM_FL_LOCAL_ONLY; 713 714 if (local_only && (lock->l_readers || lock->l_writers)) { 715 /* This is a little bit gross, but much better than the 716 * alternative: pretend that we got a blocking AST from 717 * the server, so that when the lock is decref'd, it 718 * will go away ... */ 719 unlock_res(res); 720 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); 721 if (lock->l_completion_ast) 722 lock->l_completion_ast(lock, 0, NULL); 723 LDLM_LOCK_RELEASE(lock); 724 continue; 725 } 726 727 if (client) { 728 struct lustre_handle lockh; 729 730 unlock_res(res); 731 ldlm_lock2handle(lock, &lockh); 732 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); 733 if (rc) 734 CERROR("ldlm_cli_cancel: %d\n", rc); 735 } else { 736 ldlm_resource_unlink_lock(lock); 737 unlock_res(res); 738 LDLM_DEBUG(lock, "Freeing a lock still held by a " 739 "client node"); 740 ldlm_lock_destroy(lock); 741 } 742 LDLM_LOCK_RELEASE(lock); 743 } while (1); 744} 745 746static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd, 747 struct hlist_node *hnode, void *arg) 748{ 749 struct ldlm_resource *res = cfs_hash_object(hs, hnode); 750 __u64 flags = *(__u64 *)arg; 751 752 cleanup_resource(res, &res->lr_granted, flags); 753 cleanup_resource(res, &res->lr_converting, flags); 754 cleanup_resource(res, &res->lr_waiting, flags); 755 756 return 0; 757} 758 759static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, 760 struct hlist_node *hnode, void *arg) 761{ 762 struct ldlm_resource *res = cfs_hash_object(hs, hnode); 763 764 lock_res(res); 765 CERROR("%s: namespace resource "DLDLMRES 766 " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n", 767 ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res, 768 atomic_read(&res->lr_refcount) - 1); 769 770 ldlm_resource_dump(D_ERROR, res); 771 unlock_res(res); 772 return 0; 773} 774 775/** 776 * Cancel and destroy all locks in the namespace. 777 * 778 * Typically used during evictions when server notified client that it was 779 * evicted and all of its state needs to be destroyed. 780 * Also used during shutdown. 781 */ 782int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) 783{ 784 if (ns == NULL) { 785 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); 786 return ELDLM_OK; 787 } 788 789 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags); 790 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL); 791 return ELDLM_OK; 792} 793EXPORT_SYMBOL(ldlm_namespace_cleanup); 794 795/** 796 * Attempts to free namespace. 797 * 798 * Only used when namespace goes away, like during an unmount. 799 */ 800static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force) 801{ 802 /* At shutdown time, don't call the cancellation callback */ 803 ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0); 804 805 if (atomic_read(&ns->ns_bref) > 0) { 806 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); 807 int rc; 808 CDEBUG(D_DLMTRACE, 809 "dlm namespace %s free waiting on refcount %d\n", 810 ldlm_ns_name(ns), atomic_read(&ns->ns_bref)); 811force_wait: 812 if (force) 813 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL); 814 815 rc = l_wait_event(ns->ns_waitq, 816 atomic_read(&ns->ns_bref) == 0, &lwi); 817 818 /* Forced cleanups should be able to reclaim all references, 819 * so it's safe to wait forever... we can't leak locks... */ 820 if (force && rc == -ETIMEDOUT) { 821 LCONSOLE_ERROR("Forced cleanup waiting for %s " 822 "namespace with %d resources in use, " 823 "(rc=%d)\n", ldlm_ns_name(ns), 824 atomic_read(&ns->ns_bref), rc); 825 goto force_wait; 826 } 827 828 if (atomic_read(&ns->ns_bref)) { 829 LCONSOLE_ERROR("Cleanup waiting for %s namespace " 830 "with %d resources in use, (rc=%d)\n", 831 ldlm_ns_name(ns), 832 atomic_read(&ns->ns_bref), rc); 833 return ELDLM_NAMESPACE_EXISTS; 834 } 835 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n", 836 ldlm_ns_name(ns)); 837 } 838 839 return ELDLM_OK; 840} 841 842/** 843 * Performs various cleanups for passed \a ns to make it drop refc and be 844 * ready for freeing. Waits for refc == 0. 845 * 846 * The following is done: 847 * (0) Unregister \a ns from its list to make inaccessible for potential 848 * users like pools thread and others; 849 * (1) Clear all locks in \a ns. 850 */ 851void ldlm_namespace_free_prior(struct ldlm_namespace *ns, 852 struct obd_import *imp, 853 int force) 854{ 855 int rc; 856 857 if (!ns) 858 return; 859 860 spin_lock(&ns->ns_lock); 861 ns->ns_stopping = 1; 862 spin_unlock(&ns->ns_lock); 863 864 /* 865 * Can fail with -EINTR when force == 0 in which case try harder. 866 */ 867 rc = __ldlm_namespace_free(ns, force); 868 if (rc != ELDLM_OK) { 869 if (imp) { 870 ptlrpc_disconnect_import(imp, 0); 871 ptlrpc_invalidate_import(imp); 872 } 873 874 /* 875 * With all requests dropped and the import inactive 876 * we are guaranteed all reference will be dropped. 877 */ 878 rc = __ldlm_namespace_free(ns, 1); 879 LASSERT(rc == 0); 880 } 881} 882 883/** 884 * Performs freeing memory structures related to \a ns. This is only done 885 * when ldlm_namespce_free_prior() successfully removed all resources 886 * referencing \a ns and its refc == 0. 887 */ 888void ldlm_namespace_free_post(struct ldlm_namespace *ns) 889{ 890 if (!ns) 891 return; 892 893 /* Make sure that nobody can find this ns in its list. */ 894 ldlm_namespace_unregister(ns, ns->ns_client); 895 /* Fini pool _before_ parent proc dir is removed. This is important as 896 * ldlm_pool_fini() removes own proc dir which is child to @dir. 897 * Removing it after @dir may cause oops. */ 898 ldlm_pool_fini(&ns->ns_pool); 899 900 ldlm_namespace_proc_unregister(ns); 901 cfs_hash_putref(ns->ns_rs_hash); 902 /* Namespace \a ns should be not on list at this time, otherwise 903 * this will cause issues related to using freed \a ns in poold 904 * thread. */ 905 LASSERT(list_empty(&ns->ns_list_chain)); 906 OBD_FREE_PTR(ns); 907 ldlm_put_ref(); 908} 909 910/** 911 * Cleanup the resource, and free namespace. 912 * bug 12864: 913 * Deadlock issue: 914 * proc1: destroy import 915 * class_disconnect_export(grab cl_sem) -> 916 * -> ldlm_namespace_free -> 917 * -> lprocfs_remove(grab _lprocfs_lock). 918 * proc2: read proc info 919 * lprocfs_fops_read(grab _lprocfs_lock) -> 920 * -> osc_rd_active, etc(grab cl_sem). 921 * 922 * So that I have to split the ldlm_namespace_free into two parts - the first 923 * part ldlm_namespace_free_prior is used to cleanup the resource which is 924 * being used; the 2nd part ldlm_namespace_free_post is used to unregister the 925 * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem 926 * held. 927 */ 928void ldlm_namespace_free(struct ldlm_namespace *ns, 929 struct obd_import *imp, 930 int force) 931{ 932 ldlm_namespace_free_prior(ns, imp, force); 933 ldlm_namespace_free_post(ns); 934} 935EXPORT_SYMBOL(ldlm_namespace_free); 936 937void ldlm_namespace_get(struct ldlm_namespace *ns) 938{ 939 atomic_inc(&ns->ns_bref); 940} 941EXPORT_SYMBOL(ldlm_namespace_get); 942 943/* This is only for callers that care about refcount */ 944int ldlm_namespace_get_return(struct ldlm_namespace *ns) 945{ 946 return atomic_inc_return(&ns->ns_bref); 947} 948 949void ldlm_namespace_put(struct ldlm_namespace *ns) 950{ 951 if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) { 952 wake_up(&ns->ns_waitq); 953 spin_unlock(&ns->ns_lock); 954 } 955} 956EXPORT_SYMBOL(ldlm_namespace_put); 957 958/** Register \a ns in the list of namespaces */ 959void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client) 960{ 961 mutex_lock(ldlm_namespace_lock(client)); 962 LASSERT(list_empty(&ns->ns_list_chain)); 963 list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client)); 964 ldlm_namespace_nr_inc(client); 965 mutex_unlock(ldlm_namespace_lock(client)); 966} 967 968/** Unregister \a ns from the list of namespaces. */ 969void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client) 970{ 971 mutex_lock(ldlm_namespace_lock(client)); 972 LASSERT(!list_empty(&ns->ns_list_chain)); 973 /* Some asserts and possibly other parts of the code are still 974 * using list_empty(&ns->ns_list_chain). This is why it is 975 * important to use list_del_init() here. */ 976 list_del_init(&ns->ns_list_chain); 977 ldlm_namespace_nr_dec(client); 978 mutex_unlock(ldlm_namespace_lock(client)); 979} 980 981/** Should be called with ldlm_namespace_lock(client) taken. */ 982void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, 983 ldlm_side_t client) 984{ 985 LASSERT(!list_empty(&ns->ns_list_chain)); 986 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 987 list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client)); 988} 989 990/** Should be called with ldlm_namespace_lock(client) taken. */ 991void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, 992 ldlm_side_t client) 993{ 994 LASSERT(!list_empty(&ns->ns_list_chain)); 995 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 996 list_move_tail(&ns->ns_list_chain, 997 ldlm_namespace_inactive_list(client)); 998} 999 1000/** Should be called with ldlm_namespace_lock(client) taken. */ 1001struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client) 1002{ 1003 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 1004 LASSERT(!list_empty(ldlm_namespace_list(client))); 1005 return container_of(ldlm_namespace_list(client)->next, 1006 struct ldlm_namespace, ns_list_chain); 1007} 1008 1009/** Create and initialize new resource. */ 1010static struct ldlm_resource *ldlm_resource_new(void) 1011{ 1012 struct ldlm_resource *res; 1013 int idx; 1014 1015 OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS); 1016 if (res == NULL) 1017 return NULL; 1018 1019 INIT_LIST_HEAD(&res->lr_granted); 1020 INIT_LIST_HEAD(&res->lr_converting); 1021 INIT_LIST_HEAD(&res->lr_waiting); 1022 1023 /* Initialize interval trees for each lock mode. */ 1024 for (idx = 0; idx < LCK_MODE_NUM; idx++) { 1025 res->lr_itree[idx].lit_size = 0; 1026 res->lr_itree[idx].lit_mode = 1 << idx; 1027 res->lr_itree[idx].lit_root = NULL; 1028 } 1029 1030 atomic_set(&res->lr_refcount, 1); 1031 spin_lock_init(&res->lr_lock); 1032 lu_ref_init(&res->lr_reference); 1033 1034 /* The creator of the resource must unlock the mutex after LVB 1035 * initialization. */ 1036 mutex_init(&res->lr_lvb_mutex); 1037 mutex_lock(&res->lr_lvb_mutex); 1038 1039 return res; 1040} 1041 1042/** 1043 * Return a reference to resource with given name, creating it if necessary. 1044 * Args: namespace with ns_lock unlocked 1045 * Locks: takes and releases NS hash-lock and res->lr_lock 1046 * Returns: referenced, unlocked ldlm_resource or NULL 1047 */ 1048struct ldlm_resource * 1049ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, 1050 const struct ldlm_res_id *name, ldlm_type_t type, int create) 1051{ 1052 struct hlist_node *hnode; 1053 struct ldlm_resource *res; 1054 struct cfs_hash_bd bd; 1055 __u64 version; 1056 int ns_refcount = 0; 1057 1058 LASSERT(ns != NULL); 1059 LASSERT(parent == NULL); 1060 LASSERT(ns->ns_rs_hash != NULL); 1061 LASSERT(name->name[0] != 0); 1062 1063 cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); 1064 hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); 1065 if (hnode != NULL) { 1066 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); 1067 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 1068 /* Synchronize with regard to resource creation. */ 1069 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { 1070 mutex_lock(&res->lr_lvb_mutex); 1071 mutex_unlock(&res->lr_lvb_mutex); 1072 } 1073 1074 if (unlikely(res->lr_lvb_len < 0)) { 1075 ldlm_resource_putref(res); 1076 res = NULL; 1077 } 1078 return res; 1079 } 1080 1081 version = cfs_hash_bd_version_get(&bd); 1082 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); 1083 1084 if (create == 0) 1085 return NULL; 1086 1087 LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE, 1088 "type: %d\n", type); 1089 res = ldlm_resource_new(); 1090 if (!res) 1091 return NULL; 1092 1093 res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); 1094 res->lr_name = *name; 1095 res->lr_type = type; 1096 res->lr_most_restr = LCK_NL; 1097 1098 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); 1099 hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : 1100 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); 1101 1102 if (hnode != NULL) { 1103 /* Someone won the race and already added the resource. */ 1104 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); 1105 /* Clean lu_ref for failed resource. */ 1106 lu_ref_fini(&res->lr_reference); 1107 /* We have taken lr_lvb_mutex. Drop it. */ 1108 mutex_unlock(&res->lr_lvb_mutex); 1109 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); 1110 1111 res = hlist_entry(hnode, struct ldlm_resource, lr_hash); 1112 /* Synchronize with regard to resource creation. */ 1113 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { 1114 mutex_lock(&res->lr_lvb_mutex); 1115 mutex_unlock(&res->lr_lvb_mutex); 1116 } 1117 1118 if (unlikely(res->lr_lvb_len < 0)) { 1119 ldlm_resource_putref(res); 1120 res = NULL; 1121 } 1122 return res; 1123 } 1124 /* We won! Let's add the resource. */ 1125 cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash); 1126 if (cfs_hash_bd_count_get(&bd) == 1) 1127 ns_refcount = ldlm_namespace_get_return(ns); 1128 1129 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); 1130 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { 1131 int rc; 1132 1133 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2); 1134 rc = ns->ns_lvbo->lvbo_init(res); 1135 if (rc < 0) { 1136 CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n", 1137 ns->ns_obd->obd_name, name->name[0], 1138 name->name[1], rc); 1139 if (res->lr_lvb_data) { 1140 OBD_FREE(res->lr_lvb_data, res->lr_lvb_len); 1141 res->lr_lvb_data = NULL; 1142 } 1143 res->lr_lvb_len = rc; 1144 mutex_unlock(&res->lr_lvb_mutex); 1145 ldlm_resource_putref(res); 1146 return NULL; 1147 } 1148 } 1149 1150 /* We create resource with locked lr_lvb_mutex. */ 1151 mutex_unlock(&res->lr_lvb_mutex); 1152 1153 /* Let's see if we happened to be the very first resource in this 1154 * namespace. If so, and this is a client namespace, we need to move 1155 * the namespace into the active namespaces list to be patrolled by 1156 * the ldlm_poold. */ 1157 if (ns_is_client(ns) && ns_refcount == 1) { 1158 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); 1159 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); 1160 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); 1161 } 1162 1163 return res; 1164} 1165EXPORT_SYMBOL(ldlm_resource_get); 1166 1167struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res) 1168{ 1169 LASSERT(res != NULL); 1170 LASSERT(res != LP_POISON); 1171 atomic_inc(&res->lr_refcount); 1172 CDEBUG(D_INFO, "getref res: %p count: %d\n", res, 1173 atomic_read(&res->lr_refcount)); 1174 return res; 1175} 1176 1177static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd, 1178 struct ldlm_resource *res) 1179{ 1180 struct ldlm_ns_bucket *nsb = res->lr_ns_bucket; 1181 1182 if (!list_empty(&res->lr_granted)) { 1183 ldlm_resource_dump(D_ERROR, res); 1184 LBUG(); 1185 } 1186 1187 if (!list_empty(&res->lr_converting)) { 1188 ldlm_resource_dump(D_ERROR, res); 1189 LBUG(); 1190 } 1191 1192 if (!list_empty(&res->lr_waiting)) { 1193 ldlm_resource_dump(D_ERROR, res); 1194 LBUG(); 1195 } 1196 1197 cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash, 1198 bd, &res->lr_hash); 1199 lu_ref_fini(&res->lr_reference); 1200 if (cfs_hash_bd_count_get(bd) == 0) 1201 ldlm_namespace_put(nsb->nsb_namespace); 1202} 1203 1204/* Returns 1 if the resource was freed, 0 if it remains. */ 1205int ldlm_resource_putref(struct ldlm_resource *res) 1206{ 1207 struct ldlm_namespace *ns = ldlm_res_to_ns(res); 1208 struct cfs_hash_bd bd; 1209 1210 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); 1211 CDEBUG(D_INFO, "putref res: %p count: %d\n", 1212 res, atomic_read(&res->lr_refcount) - 1); 1213 1214 cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd); 1215 if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) { 1216 __ldlm_resource_putref_final(&bd, res); 1217 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); 1218 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) 1219 ns->ns_lvbo->lvbo_free(res); 1220 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); 1221 return 1; 1222 } 1223 return 0; 1224} 1225EXPORT_SYMBOL(ldlm_resource_putref); 1226 1227/* Returns 1 if the resource was freed, 0 if it remains. */ 1228int ldlm_resource_putref_locked(struct ldlm_resource *res) 1229{ 1230 struct ldlm_namespace *ns = ldlm_res_to_ns(res); 1231 1232 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); 1233 CDEBUG(D_INFO, "putref res: %p count: %d\n", 1234 res, atomic_read(&res->lr_refcount) - 1); 1235 1236 if (atomic_dec_and_test(&res->lr_refcount)) { 1237 struct cfs_hash_bd bd; 1238 1239 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash, 1240 &res->lr_name, &bd); 1241 __ldlm_resource_putref_final(&bd, res); 1242 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); 1243 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF, 1244 * so we should never be here while calling cfs_hash_del, 1245 * cfs_hash_for_each_nolock is the only case we can get 1246 * here, which is safe to release cfs_hash_bd_lock. 1247 */ 1248 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) 1249 ns->ns_lvbo->lvbo_free(res); 1250 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); 1251 1252 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); 1253 return 1; 1254 } 1255 return 0; 1256} 1257 1258/** 1259 * Add a lock into a given resource into specified lock list. 1260 */ 1261void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, 1262 struct ldlm_lock *lock) 1263{ 1264 check_res_locked(res); 1265 1266 LDLM_DEBUG(lock, "About to add this lock:\n"); 1267 1268 if (lock->l_flags & LDLM_FL_DESTROYED) { 1269 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); 1270 return; 1271 } 1272 1273 LASSERT(list_empty(&lock->l_res_link)); 1274 1275 list_add_tail(&lock->l_res_link, head); 1276} 1277 1278/** 1279 * Insert a lock into resource after specified lock. 1280 * 1281 * Obtain resource description from the lock we are inserting after. 1282 */ 1283void ldlm_resource_insert_lock_after(struct ldlm_lock *original, 1284 struct ldlm_lock *new) 1285{ 1286 struct ldlm_resource *res = original->l_resource; 1287 1288 check_res_locked(res); 1289 1290 ldlm_resource_dump(D_INFO, res); 1291 LDLM_DEBUG(new, "About to insert this lock after %p:\n", original); 1292 1293 if (new->l_flags & LDLM_FL_DESTROYED) { 1294 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); 1295 goto out; 1296 } 1297 1298 LASSERT(list_empty(&new->l_res_link)); 1299 1300 list_add(&new->l_res_link, &original->l_res_link); 1301 out:; 1302} 1303 1304void ldlm_resource_unlink_lock(struct ldlm_lock *lock) 1305{ 1306 int type = lock->l_resource->lr_type; 1307 1308 check_res_locked(lock->l_resource); 1309 if (type == LDLM_IBITS || type == LDLM_PLAIN) 1310 ldlm_unlink_lock_skiplist(lock); 1311 else if (type == LDLM_EXTENT) 1312 ldlm_extent_unlink_lock(lock); 1313 list_del_init(&lock->l_res_link); 1314} 1315EXPORT_SYMBOL(ldlm_resource_unlink_lock); 1316 1317void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc) 1318{ 1319 desc->lr_type = res->lr_type; 1320 desc->lr_name = res->lr_name; 1321} 1322 1323/** 1324 * Print information about all locks in all namespaces on this node to debug 1325 * log. 1326 */ 1327void ldlm_dump_all_namespaces(ldlm_side_t client, int level) 1328{ 1329 struct list_head *tmp; 1330 1331 if (!((libcfs_debug | D_ERROR) & level)) 1332 return; 1333 1334 mutex_lock(ldlm_namespace_lock(client)); 1335 1336 list_for_each(tmp, ldlm_namespace_list(client)) { 1337 struct ldlm_namespace *ns; 1338 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain); 1339 ldlm_namespace_dump(level, ns); 1340 } 1341 1342 mutex_unlock(ldlm_namespace_lock(client)); 1343} 1344EXPORT_SYMBOL(ldlm_dump_all_namespaces); 1345 1346static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, 1347 struct hlist_node *hnode, void *arg) 1348{ 1349 struct ldlm_resource *res = cfs_hash_object(hs, hnode); 1350 int level = (int)(unsigned long)arg; 1351 1352 lock_res(res); 1353 ldlm_resource_dump(level, res); 1354 unlock_res(res); 1355 1356 return 0; 1357} 1358 1359/** 1360 * Print information about all locks in this namespace on this node to debug 1361 * log. 1362 */ 1363void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) 1364{ 1365 if (!((libcfs_debug | D_ERROR) & level)) 1366 return; 1367 1368 CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", 1369 ldlm_ns_name(ns), atomic_read(&ns->ns_bref), 1370 ns_is_client(ns) ? "client" : "server"); 1371 1372 if (time_before(cfs_time_current(), ns->ns_next_dump)) 1373 return; 1374 1375 cfs_hash_for_each_nolock(ns->ns_rs_hash, 1376 ldlm_res_hash_dump, 1377 (void *)(unsigned long)level); 1378 spin_lock(&ns->ns_lock); 1379 ns->ns_next_dump = cfs_time_shift(10); 1380 spin_unlock(&ns->ns_lock); 1381} 1382EXPORT_SYMBOL(ldlm_namespace_dump); 1383 1384/** 1385 * Print information about all locks in this resource to debug log. 1386 */ 1387void ldlm_resource_dump(int level, struct ldlm_resource *res) 1388{ 1389 struct ldlm_lock *lock; 1390 unsigned int granted = 0; 1391 1392 CLASSERT(RES_NAME_SIZE == 4); 1393 1394 if (!((libcfs_debug | D_ERROR) & level)) 1395 return; 1396 1397 CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n", 1398 PLDLMRES(res), res, atomic_read(&res->lr_refcount)); 1399 1400 if (!list_empty(&res->lr_granted)) { 1401 CDEBUG(level, "Granted locks (in reverse order):\n"); 1402 list_for_each_entry_reverse(lock, &res->lr_granted, 1403 l_res_link) { 1404 LDLM_DEBUG_LIMIT(level, lock, "###"); 1405 if (!(level & D_CANTMASK) && 1406 ++granted > ldlm_dump_granted_max) { 1407 CDEBUG(level, "only dump %d granted locks to " 1408 "avoid DDOS.\n", granted); 1409 break; 1410 } 1411 } 1412 } 1413 if (!list_empty(&res->lr_converting)) { 1414 CDEBUG(level, "Converting locks:\n"); 1415 list_for_each_entry(lock, &res->lr_converting, l_res_link) 1416 LDLM_DEBUG_LIMIT(level, lock, "###"); 1417 } 1418 if (!list_empty(&res->lr_waiting)) { 1419 CDEBUG(level, "Waiting locks:\n"); 1420 list_for_each_entry(lock, &res->lr_waiting, l_res_link) 1421 LDLM_DEBUG_LIMIT(level, lock, "###"); 1422 } 1423} 1424