[go: nahoru, domu]

dm-cache-policy-mq.c revision 633618e3353f8953e43d989d08302f5dcd51d8be
1/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-cache-policy.h"
8#include "dm.h"
9
10#include <linux/hash.h>
11#include <linux/module.h>
12#include <linux/mutex.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
16#define DM_MSG_PREFIX "cache-policy-mq"
17
18static struct kmem_cache *mq_entry_cache;
19
20/*----------------------------------------------------------------*/
21
22static unsigned next_power(unsigned n, unsigned min)
23{
24	return roundup_pow_of_two(max(n, min));
25}
26
27/*----------------------------------------------------------------*/
28
29/*
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
32 *
33 * The io_tracker tries to spot when the io is in one of these sequential
34 * modes.
35 *
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
38 */
39#define RANDOM_THRESHOLD_DEFAULT 4
40#define SEQUENTIAL_THRESHOLD_DEFAULT 512
41
42enum io_pattern {
43	PATTERN_SEQUENTIAL,
44	PATTERN_RANDOM
45};
46
47struct io_tracker {
48	enum io_pattern pattern;
49
50	unsigned nr_seq_samples;
51	unsigned nr_rand_samples;
52	unsigned thresholds[2];
53
54	dm_oblock_t last_end_oblock;
55};
56
57static void iot_init(struct io_tracker *t,
58		     int sequential_threshold, int random_threshold)
59{
60	t->pattern = PATTERN_RANDOM;
61	t->nr_seq_samples = 0;
62	t->nr_rand_samples = 0;
63	t->last_end_oblock = 0;
64	t->thresholds[PATTERN_RANDOM] = random_threshold;
65	t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
66}
67
68static enum io_pattern iot_pattern(struct io_tracker *t)
69{
70	return t->pattern;
71}
72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{
75	if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
76		t->nr_seq_samples++;
77	else {
78		/*
79		 * Just one non-sequential IO is enough to reset the
80		 * counters.
81		 */
82		if (t->nr_seq_samples) {
83			t->nr_seq_samples = 0;
84			t->nr_rand_samples = 0;
85		}
86
87		t->nr_rand_samples++;
88	}
89
90	t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
91}
92
93static void iot_check_for_pattern_switch(struct io_tracker *t)
94{
95	switch (t->pattern) {
96	case PATTERN_SEQUENTIAL:
97		if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
98			t->pattern = PATTERN_RANDOM;
99			t->nr_seq_samples = t->nr_rand_samples = 0;
100		}
101		break;
102
103	case PATTERN_RANDOM:
104		if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
105			t->pattern = PATTERN_SEQUENTIAL;
106			t->nr_seq_samples = t->nr_rand_samples = 0;
107		}
108		break;
109	}
110}
111
112static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
113{
114	iot_update_stats(t, bio);
115	iot_check_for_pattern_switch(t);
116}
117
118/*----------------------------------------------------------------*/
119
120
121/*
122 * This queue is divided up into different levels.  Allowing us to push
123 * entries to the back of any of the levels.  Think of it as a partially
124 * sorted queue.
125 */
126#define NR_QUEUE_LEVELS 16u
127
128struct queue {
129	struct list_head qs[NR_QUEUE_LEVELS];
130};
131
132static void queue_init(struct queue *q)
133{
134	unsigned i;
135
136	for (i = 0; i < NR_QUEUE_LEVELS; i++)
137		INIT_LIST_HEAD(q->qs + i);
138}
139
140/*
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
143 */
144static bool queue_empty(struct queue *q)
145{
146	unsigned i;
147
148	for (i = 0; i < NR_QUEUE_LEVELS; i++)
149		if (!list_empty(q->qs + i))
150			return false;
151
152	return true;
153}
154
155/*
156 * Insert an entry to the back of the given level.
157 */
158static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
159{
160	list_add_tail(elt, q->qs + level);
161}
162
163static void queue_remove(struct list_head *elt)
164{
165	list_del(elt);
166}
167
168/*
169 * Shifts all regions down one level.  This has no effect on the order of
170 * the queue.
171 */
172static void queue_shift_down(struct queue *q)
173{
174	unsigned level;
175
176	for (level = 1; level < NR_QUEUE_LEVELS; level++)
177		list_splice_init(q->qs + level, q->qs + level - 1);
178}
179
180/*
181 * Gives us the oldest entry of the lowest popoulated level.  If the first
182 * level is emptied then we shift down one level.
183 */
184static struct list_head *queue_pop(struct queue *q)
185{
186	unsigned level;
187	struct list_head *r;
188
189	for (level = 0; level < NR_QUEUE_LEVELS; level++)
190		if (!list_empty(q->qs + level)) {
191			r = q->qs[level].next;
192			list_del(r);
193
194			/* have we just emptied the bottom level? */
195			if (level == 0 && list_empty(q->qs))
196				queue_shift_down(q);
197
198			return r;
199		}
200
201	return NULL;
202}
203
204static struct list_head *list_pop(struct list_head *lh)
205{
206	struct list_head *r = lh->next;
207
208	BUG_ON(!r);
209	list_del_init(r);
210
211	return r;
212}
213
214/*----------------------------------------------------------------*/
215
216/*
217 * Describes a cache entry.  Used in both the cache and the pre_cache.
218 */
219struct entry {
220	struct hlist_node hlist;
221	struct list_head list;
222	dm_oblock_t oblock;
223
224	/*
225	 * FIXME: pack these better
226	 */
227	bool dirty:1;
228	unsigned hit_count;
229	unsigned generation;
230	unsigned tick;
231};
232
233/*
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
236 *
237 * Free entries are linked together into a list.
238 */
239struct entry_pool {
240	struct entry *entries, *entries_end;
241	struct list_head free;
242	unsigned nr_allocated;
243};
244
245static int epool_init(struct entry_pool *ep, unsigned nr_entries)
246{
247	unsigned i;
248
249	ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
250	if (!ep->entries)
251		return -ENOMEM;
252
253	ep->entries_end = ep->entries + nr_entries;
254
255	INIT_LIST_HEAD(&ep->free);
256	for (i = 0; i < nr_entries; i++)
257		list_add(&ep->entries[i].list, &ep->free);
258
259	ep->nr_allocated = 0;
260
261	return 0;
262}
263
264static void epool_exit(struct entry_pool *ep)
265{
266	vfree(ep->entries);
267}
268
269static struct entry *alloc_entry(struct entry_pool *ep)
270{
271	struct entry *e;
272
273	if (list_empty(&ep->free))
274		return NULL;
275
276	e = list_entry(list_pop(&ep->free), struct entry, list);
277	INIT_LIST_HEAD(&e->list);
278	INIT_HLIST_NODE(&e->hlist);
279	ep->nr_allocated++;
280
281	return e;
282}
283
284/*
285 * This assumes the cblock hasn't already been allocated.
286 */
287static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
288{
289	struct entry *e = ep->entries + from_cblock(cblock);
290	list_del(&e->list);
291
292	INIT_LIST_HEAD(&e->list);
293	INIT_HLIST_NODE(&e->hlist);
294	ep->nr_allocated++;
295
296	return e;
297}
298
299static void free_entry(struct entry_pool *ep, struct entry *e)
300{
301	BUG_ON(!ep->nr_allocated);
302	ep->nr_allocated--;
303	INIT_HLIST_NODE(&e->hlist);
304	list_add(&e->list, &ep->free);
305}
306
307static bool epool_empty(struct entry_pool *ep)
308{
309	return list_empty(&ep->free);
310}
311
312static bool in_pool(struct entry_pool *ep, struct entry *e)
313{
314	return e >= ep->entries && e < ep->entries_end;
315}
316
317static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
318{
319	return to_cblock(e - ep->entries);
320}
321
322/*----------------------------------------------------------------*/
323
324struct mq_policy {
325	struct dm_cache_policy policy;
326
327	/* protects everything */
328	struct mutex lock;
329	dm_cblock_t cache_size;
330	struct io_tracker tracker;
331
332	/*
333	 * Entries come from two pools, one of pre-cache entries, and one
334	 * for the cache proper.
335	 */
336	struct entry_pool pre_cache_pool;
337	struct entry_pool cache_pool;
338
339	/*
340	 * We maintain three queues of entries.  The cache proper,
341	 * consisting of a clean and dirty queue, contains the currently
342	 * active mappings.  Whereas the pre_cache tracks blocks that
343	 * are being hit frequently and potential candidates for promotion
344	 * to the cache.
345	 */
346	struct queue pre_cache;
347	struct queue cache_clean;
348	struct queue cache_dirty;
349
350	/*
351	 * Keeps track of time, incremented by the core.  We use this to
352	 * avoid attributing multiple hits within the same tick.
353	 *
354	 * Access to tick_protected should be done with the spin lock held.
355	 * It's copied to tick at the start of the map function (within the
356	 * mutex).
357	 */
358	spinlock_t tick_lock;
359	unsigned tick_protected;
360	unsigned tick;
361
362	/*
363	 * A count of the number of times the map function has been called
364	 * and found an entry in the pre_cache or cache.  Currently used to
365	 * calculate the generation.
366	 */
367	unsigned hit_count;
368
369	/*
370	 * A generation is a longish period that is used to trigger some
371	 * book keeping effects.  eg, decrementing hit counts on entries.
372	 * This is needed to allow the cache to evolve as io patterns
373	 * change.
374	 */
375	unsigned generation;
376	unsigned generation_period; /* in lookups (will probably change) */
377
378	/*
379	 * Entries in the pre_cache whose hit count passes the promotion
380	 * threshold move to the cache proper.  Working out the correct
381	 * value for the promotion_threshold is crucial to this policy.
382	 */
383	unsigned promote_threshold;
384
385	/*
386	 * The hash table allows us to quickly find an entry by origin
387	 * block.  Both pre_cache and cache entries are in here.
388	 */
389	unsigned nr_buckets;
390	dm_block_t hash_bits;
391	struct hlist_head *table;
392};
393
394/*----------------------------------------------------------------*/
395
396/*
397 * Simple hash table implementation.  Should replace with the standard hash
398 * table that's making its way upstream.
399 */
400static void hash_insert(struct mq_policy *mq, struct entry *e)
401{
402	unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
403
404	hlist_add_head(&e->hlist, mq->table + h);
405}
406
407static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
408{
409	unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
410	struct hlist_head *bucket = mq->table + h;
411	struct entry *e;
412
413	hlist_for_each_entry(e, bucket, hlist)
414		if (e->oblock == oblock) {
415			hlist_del(&e->hlist);
416			hlist_add_head(&e->hlist, bucket);
417			return e;
418		}
419
420	return NULL;
421}
422
423static void hash_remove(struct entry *e)
424{
425	hlist_del(&e->hlist);
426}
427
428/*----------------------------------------------------------------*/
429
430static bool any_free_cblocks(struct mq_policy *mq)
431{
432	return !epool_empty(&mq->cache_pool);
433}
434
435static bool any_clean_cblocks(struct mq_policy *mq)
436{
437	return !queue_empty(&mq->cache_clean);
438}
439
440/*----------------------------------------------------------------*/
441
442/*
443 * Now we get to the meat of the policy.  This section deals with deciding
444 * when to to add entries to the pre_cache and cache, and move between
445 * them.
446 */
447
448/*
449 * The queue level is based on the log2 of the hit count.
450 */
451static unsigned queue_level(struct entry *e)
452{
453	return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
454}
455
456static bool in_cache(struct mq_policy *mq, struct entry *e)
457{
458	return in_pool(&mq->cache_pool, e);
459}
460
461/*
462 * Inserts the entry into the pre_cache or the cache.  Ensures the cache
463 * block is marked as allocated if necc.  Inserts into the hash table.
464 * Sets the tick which records when the entry was last moved about.
465 */
466static void push(struct mq_policy *mq, struct entry *e)
467{
468	e->tick = mq->tick;
469	hash_insert(mq, e);
470
471	if (in_cache(mq, e))
472		queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
473			   queue_level(e), &e->list);
474	else
475		queue_push(&mq->pre_cache, queue_level(e), &e->list);
476}
477
478/*
479 * Removes an entry from pre_cache or cache.  Removes from the hash table.
480 */
481static void del(struct mq_policy *mq, struct entry *e)
482{
483	queue_remove(&e->list);
484	hash_remove(e);
485}
486
487/*
488 * Like del, except it removes the first entry in the queue (ie. the least
489 * recently used).
490 */
491static struct entry *pop(struct mq_policy *mq, struct queue *q)
492{
493	struct entry *e;
494	struct list_head *h = queue_pop(q);
495
496	if (!h)
497		return NULL;
498
499	e = container_of(h, struct entry, list);
500	hash_remove(e);
501
502	return e;
503}
504
505/*
506 * Has this entry already been updated?
507 */
508static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
509{
510	return mq->tick == e->tick;
511}
512
513/*
514 * The promotion threshold is adjusted every generation.  As are the counts
515 * of the entries.
516 *
517 * At the moment the threshold is taken by averaging the hit counts of some
518 * of the entries in the cache (the first 20 entries across all levels in
519 * ascending order, giving preference to the clean entries at each level).
520 *
521 * We can be much cleverer than this though.  For example, each promotion
522 * could bump up the threshold helping to prevent churn.  Much more to do
523 * here.
524 */
525
526#define MAX_TO_AVERAGE 20
527
528static void check_generation(struct mq_policy *mq)
529{
530	unsigned total = 0, nr = 0, count = 0, level;
531	struct list_head *head;
532	struct entry *e;
533
534	if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
535		mq->hit_count = 0;
536		mq->generation++;
537
538		for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
539			head = mq->cache_clean.qs + level;
540			list_for_each_entry(e, head, list) {
541				nr++;
542				total += e->hit_count;
543
544				if (++count >= MAX_TO_AVERAGE)
545					break;
546			}
547
548			head = mq->cache_dirty.qs + level;
549			list_for_each_entry(e, head, list) {
550				nr++;
551				total += e->hit_count;
552
553				if (++count >= MAX_TO_AVERAGE)
554					break;
555			}
556		}
557
558		mq->promote_threshold = nr ? total / nr : 1;
559		if (mq->promote_threshold * nr < total)
560			mq->promote_threshold++;
561	}
562}
563
564/*
565 * Whenever we use an entry we bump up it's hit counter, and push it to the
566 * back to it's current level.
567 */
568static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
569{
570	if (updated_this_tick(mq, e))
571		return;
572
573	e->hit_count++;
574	mq->hit_count++;
575	check_generation(mq);
576
577	/* generation adjustment, to stop the counts increasing forever. */
578	/* FIXME: divide? */
579	/* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
580	e->generation = mq->generation;
581
582	del(mq, e);
583	push(mq, e);
584}
585
586/*
587 * Demote the least recently used entry from the cache to the pre_cache.
588 * Returns the new cache entry to use, and the old origin block it was
589 * mapped to.
590 *
591 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
592 * straight back into the cache if it's subsequently hit.  There are
593 * various options here, and more experimentation would be good:
594 *
595 * - just forget about the demoted entry completely (ie. don't insert it
596     into the pre_cache).
597 * - divide the hit count rather that setting to some hard coded value.
598 * - set the hit count to a hard coded value other than 1, eg, is it better
599 *   if it goes in at level 2?
600 */
601static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
602{
603	struct entry *demoted = pop(mq, &mq->cache_clean);
604
605	if (!demoted)
606		/*
607		 * We could get a block from mq->cache_dirty, but that
608		 * would add extra latency to the triggering bio as it
609		 * waits for the writeback.  Better to not promote this
610		 * time and hope there's a clean block next time this block
611		 * is hit.
612		 */
613		return -ENOSPC;
614
615	*oblock = demoted->oblock;
616	free_entry(&mq->cache_pool, demoted);
617
618	/*
619	 * We used to put the demoted block into the pre-cache, but I think
620	 * it's simpler to just let it work it's way up from zero again.
621	 * Stops blocks flickering in and out of the cache.
622	 */
623
624	return 0;
625}
626
627/*
628 * We modify the basic promotion_threshold depending on the specific io.
629 *
630 * If the origin block has been discarded then there's no cost to copy it
631 * to the cache.
632 *
633 * We bias towards reads, since they can be demoted at no cost if they
634 * haven't been dirtied.
635 */
636#define DISCARDED_PROMOTE_THRESHOLD 1
637#define READ_PROMOTE_THRESHOLD 4
638#define WRITE_PROMOTE_THRESHOLD 8
639
640static unsigned adjusted_promote_threshold(struct mq_policy *mq,
641					   bool discarded_oblock, int data_dir)
642{
643	if (data_dir == READ)
644		return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
645
646	if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
647		/*
648		 * We don't need to do any copying at all, so give this a
649		 * very low threshold.
650		 */
651		return DISCARDED_PROMOTE_THRESHOLD;
652	}
653
654	return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
655}
656
657static bool should_promote(struct mq_policy *mq, struct entry *e,
658			   bool discarded_oblock, int data_dir)
659{
660	return e->hit_count >=
661		adjusted_promote_threshold(mq, discarded_oblock, data_dir);
662}
663
664static int cache_entry_found(struct mq_policy *mq,
665			     struct entry *e,
666			     struct policy_result *result)
667{
668	requeue_and_update_tick(mq, e);
669
670	if (in_cache(mq, e)) {
671		result->op = POLICY_HIT;
672		result->cblock = infer_cblock(&mq->cache_pool, e);
673	}
674
675	return 0;
676}
677
678/*
679 * Moves an entry from the pre_cache to the cache.  The main work is
680 * finding which cache block to use.
681 */
682static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
683			      struct policy_result *result)
684{
685	int r;
686	struct entry *new_e;
687
688	/* Ensure there's a free cblock in the cache */
689	if (epool_empty(&mq->cache_pool)) {
690		result->op = POLICY_REPLACE;
691		r = demote_cblock(mq, &result->old_oblock);
692		if (r) {
693			result->op = POLICY_MISS;
694			return 0;
695		}
696	} else
697		result->op = POLICY_NEW;
698
699	new_e = alloc_entry(&mq->cache_pool);
700	BUG_ON(!new_e);
701
702	new_e->oblock = e->oblock;
703	new_e->dirty = false;
704	new_e->hit_count = e->hit_count;
705	new_e->generation = e->generation;
706	new_e->tick = e->tick;
707
708	del(mq, e);
709	free_entry(&mq->pre_cache_pool, e);
710	push(mq, new_e);
711
712	result->cblock = infer_cblock(&mq->cache_pool, new_e);
713
714	return 0;
715}
716
717static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
718				 bool can_migrate, bool discarded_oblock,
719				 int data_dir, struct policy_result *result)
720{
721	int r = 0;
722	bool updated = updated_this_tick(mq, e);
723
724	requeue_and_update_tick(mq, e);
725
726	if ((!discarded_oblock && updated) ||
727	    !should_promote(mq, e, discarded_oblock, data_dir))
728		result->op = POLICY_MISS;
729	else if (!can_migrate)
730		r = -EWOULDBLOCK;
731	else
732		r = pre_cache_to_cache(mq, e, result);
733
734	return r;
735}
736
737static void insert_in_pre_cache(struct mq_policy *mq,
738				dm_oblock_t oblock)
739{
740	struct entry *e = alloc_entry(&mq->pre_cache_pool);
741
742	if (!e)
743		/*
744		 * There's no spare entry structure, so we grab the least
745		 * used one from the pre_cache.
746		 */
747		e = pop(mq, &mq->pre_cache);
748
749	if (unlikely(!e)) {
750		DMWARN("couldn't pop from pre cache");
751		return;
752	}
753
754	e->dirty = false;
755	e->oblock = oblock;
756	e->hit_count = 1;
757	e->generation = mq->generation;
758	push(mq, e);
759}
760
761static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
762			    struct policy_result *result)
763{
764	int r;
765	struct entry *e;
766
767	if (epool_empty(&mq->cache_pool)) {
768		result->op = POLICY_REPLACE;
769		r = demote_cblock(mq, &result->old_oblock);
770		if (unlikely(r)) {
771			result->op = POLICY_MISS;
772			insert_in_pre_cache(mq, oblock);
773			return;
774		}
775
776		/*
777		 * This will always succeed, since we've just demoted.
778		 */
779		e = alloc_entry(&mq->cache_pool);
780		BUG_ON(!e);
781
782	} else {
783		e = alloc_entry(&mq->cache_pool);
784		result->op = POLICY_NEW;
785	}
786
787	e->oblock = oblock;
788	e->dirty = false;
789	e->hit_count = 1;
790	e->generation = mq->generation;
791	push(mq, e);
792
793	result->cblock = infer_cblock(&mq->cache_pool, e);
794}
795
796static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
797			  bool can_migrate, bool discarded_oblock,
798			  int data_dir, struct policy_result *result)
799{
800	if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
801		if (can_migrate)
802			insert_in_cache(mq, oblock, result);
803		else
804			return -EWOULDBLOCK;
805	} else {
806		insert_in_pre_cache(mq, oblock);
807		result->op = POLICY_MISS;
808	}
809
810	return 0;
811}
812
813/*
814 * Looks the oblock up in the hash table, then decides whether to put in
815 * pre_cache, or cache etc.
816 */
817static int map(struct mq_policy *mq, dm_oblock_t oblock,
818	       bool can_migrate, bool discarded_oblock,
819	       int data_dir, struct policy_result *result)
820{
821	int r = 0;
822	struct entry *e = hash_lookup(mq, oblock);
823
824	if (e && in_cache(mq, e))
825		r = cache_entry_found(mq, e, result);
826
827	else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
828		result->op = POLICY_MISS;
829
830	else if (e)
831		r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
832					  data_dir, result);
833
834	else
835		r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
836				   data_dir, result);
837
838	if (r == -EWOULDBLOCK)
839		result->op = POLICY_MISS;
840
841	return r;
842}
843
844/*----------------------------------------------------------------*/
845
846/*
847 * Public interface, via the policy struct.  See dm-cache-policy.h for a
848 * description of these.
849 */
850
851static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
852{
853	return container_of(p, struct mq_policy, policy);
854}
855
856static void mq_destroy(struct dm_cache_policy *p)
857{
858	struct mq_policy *mq = to_mq_policy(p);
859
860	kfree(mq->table);
861	epool_exit(&mq->cache_pool);
862	epool_exit(&mq->pre_cache_pool);
863	kfree(mq);
864}
865
866static void copy_tick(struct mq_policy *mq)
867{
868	unsigned long flags;
869
870	spin_lock_irqsave(&mq->tick_lock, flags);
871	mq->tick = mq->tick_protected;
872	spin_unlock_irqrestore(&mq->tick_lock, flags);
873}
874
875static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
876		  bool can_block, bool can_migrate, bool discarded_oblock,
877		  struct bio *bio, struct policy_result *result)
878{
879	int r;
880	struct mq_policy *mq = to_mq_policy(p);
881
882	result->op = POLICY_MISS;
883
884	if (can_block)
885		mutex_lock(&mq->lock);
886	else if (!mutex_trylock(&mq->lock))
887		return -EWOULDBLOCK;
888
889	copy_tick(mq);
890
891	iot_examine_bio(&mq->tracker, bio);
892	r = map(mq, oblock, can_migrate, discarded_oblock,
893		bio_data_dir(bio), result);
894
895	mutex_unlock(&mq->lock);
896
897	return r;
898}
899
900static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
901{
902	int r;
903	struct mq_policy *mq = to_mq_policy(p);
904	struct entry *e;
905
906	if (!mutex_trylock(&mq->lock))
907		return -EWOULDBLOCK;
908
909	e = hash_lookup(mq, oblock);
910	if (e && in_cache(mq, e)) {
911		*cblock = infer_cblock(&mq->cache_pool, e);
912		r = 0;
913	} else
914		r = -ENOENT;
915
916	mutex_unlock(&mq->lock);
917
918	return r;
919}
920
921static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
922{
923	struct entry *e;
924
925	e = hash_lookup(mq, oblock);
926	BUG_ON(!e || !in_cache(mq, e));
927
928	del(mq, e);
929	e->dirty = set;
930	push(mq, e);
931}
932
933static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
934{
935	struct mq_policy *mq = to_mq_policy(p);
936
937	mutex_lock(&mq->lock);
938	__mq_set_clear_dirty(mq, oblock, true);
939	mutex_unlock(&mq->lock);
940}
941
942static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
943{
944	struct mq_policy *mq = to_mq_policy(p);
945
946	mutex_lock(&mq->lock);
947	__mq_set_clear_dirty(mq, oblock, false);
948	mutex_unlock(&mq->lock);
949}
950
951static int mq_load_mapping(struct dm_cache_policy *p,
952			   dm_oblock_t oblock, dm_cblock_t cblock,
953			   uint32_t hint, bool hint_valid)
954{
955	struct mq_policy *mq = to_mq_policy(p);
956	struct entry *e;
957
958	e = alloc_particular_entry(&mq->cache_pool, cblock);
959	e->oblock = oblock;
960	e->dirty = false;	/* this gets corrected in a minute */
961	e->hit_count = hint_valid ? hint : 1;
962	e->generation = mq->generation;
963	push(mq, e);
964
965	return 0;
966}
967
968static int mq_save_hints(struct mq_policy *mq, struct queue *q,
969			 policy_walk_fn fn, void *context)
970{
971	int r;
972	unsigned level;
973	struct entry *e;
974
975	for (level = 0; level < NR_QUEUE_LEVELS; level++)
976		list_for_each_entry(e, q->qs + level, list) {
977			r = fn(context, infer_cblock(&mq->cache_pool, e),
978			       e->oblock, e->hit_count);
979			if (r)
980				return r;
981		}
982
983	return 0;
984}
985
986static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
987			    void *context)
988{
989	struct mq_policy *mq = to_mq_policy(p);
990	int r = 0;
991
992	mutex_lock(&mq->lock);
993
994	r = mq_save_hints(mq, &mq->cache_clean, fn, context);
995	if (!r)
996		r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
997
998	mutex_unlock(&mq->lock);
999
1000	return r;
1001}
1002
1003static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
1004{
1005	struct entry *e;
1006
1007	e = hash_lookup(mq, oblock);
1008	BUG_ON(!e || !in_cache(mq, e));
1009
1010	del(mq, e);
1011	free_entry(&mq->cache_pool, e);
1012}
1013
1014static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1015{
1016	struct mq_policy *mq = to_mq_policy(p);
1017
1018	mutex_lock(&mq->lock);
1019	__remove_mapping(mq, oblock);
1020	mutex_unlock(&mq->lock);
1021}
1022
1023static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1024			      dm_cblock_t *cblock)
1025{
1026	struct entry *e = pop(mq, &mq->cache_dirty);
1027
1028	if (!e)
1029		return -ENODATA;
1030
1031	*oblock = e->oblock;
1032	*cblock = infer_cblock(&mq->cache_pool, e);
1033	e->dirty = false;
1034	push(mq, e);
1035
1036	return 0;
1037}
1038
1039static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1040			     dm_cblock_t *cblock)
1041{
1042	int r;
1043	struct mq_policy *mq = to_mq_policy(p);
1044
1045	mutex_lock(&mq->lock);
1046	r = __mq_writeback_work(mq, oblock, cblock);
1047	mutex_unlock(&mq->lock);
1048
1049	return r;
1050}
1051
1052static void __force_mapping(struct mq_policy *mq,
1053			    dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1054{
1055	struct entry *e = hash_lookup(mq, current_oblock);
1056
1057	if (e && in_cache(mq, e)) {
1058		del(mq, e);
1059		e->oblock = new_oblock;
1060		e->dirty = true;
1061		push(mq, e);
1062	}
1063}
1064
1065static void mq_force_mapping(struct dm_cache_policy *p,
1066			     dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1067{
1068	struct mq_policy *mq = to_mq_policy(p);
1069
1070	mutex_lock(&mq->lock);
1071	__force_mapping(mq, current_oblock, new_oblock);
1072	mutex_unlock(&mq->lock);
1073}
1074
1075static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1076{
1077	dm_cblock_t r;
1078	struct mq_policy *mq = to_mq_policy(p);
1079
1080	mutex_lock(&mq->lock);
1081	r = to_cblock(mq->cache_pool.nr_allocated);
1082	mutex_unlock(&mq->lock);
1083
1084	return r;
1085}
1086
1087static void mq_tick(struct dm_cache_policy *p)
1088{
1089	struct mq_policy *mq = to_mq_policy(p);
1090	unsigned long flags;
1091
1092	spin_lock_irqsave(&mq->tick_lock, flags);
1093	mq->tick_protected++;
1094	spin_unlock_irqrestore(&mq->tick_lock, flags);
1095}
1096
1097static int mq_set_config_value(struct dm_cache_policy *p,
1098			       const char *key, const char *value)
1099{
1100	struct mq_policy *mq = to_mq_policy(p);
1101	enum io_pattern pattern;
1102	unsigned long tmp;
1103
1104	if (!strcasecmp(key, "random_threshold"))
1105		pattern = PATTERN_RANDOM;
1106	else if (!strcasecmp(key, "sequential_threshold"))
1107		pattern = PATTERN_SEQUENTIAL;
1108	else
1109		return -EINVAL;
1110
1111	if (kstrtoul(value, 10, &tmp))
1112		return -EINVAL;
1113
1114	mq->tracker.thresholds[pattern] = tmp;
1115
1116	return 0;
1117}
1118
1119static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1120{
1121	ssize_t sz = 0;
1122	struct mq_policy *mq = to_mq_policy(p);
1123
1124	DMEMIT("4 random_threshold %u sequential_threshold %u",
1125	       mq->tracker.thresholds[PATTERN_RANDOM],
1126	       mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
1127
1128	return 0;
1129}
1130
1131/* Init the policy plugin interface function pointers. */
1132static void init_policy_functions(struct mq_policy *mq)
1133{
1134	mq->policy.destroy = mq_destroy;
1135	mq->policy.map = mq_map;
1136	mq->policy.lookup = mq_lookup;
1137	mq->policy.set_dirty = mq_set_dirty;
1138	mq->policy.clear_dirty = mq_clear_dirty;
1139	mq->policy.load_mapping = mq_load_mapping;
1140	mq->policy.walk_mappings = mq_walk_mappings;
1141	mq->policy.remove_mapping = mq_remove_mapping;
1142	mq->policy.writeback_work = mq_writeback_work;
1143	mq->policy.force_mapping = mq_force_mapping;
1144	mq->policy.residency = mq_residency;
1145	mq->policy.tick = mq_tick;
1146	mq->policy.emit_config_values = mq_emit_config_values;
1147	mq->policy.set_config_value = mq_set_config_value;
1148}
1149
1150static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1151					 sector_t origin_size,
1152					 sector_t cache_block_size)
1153{
1154	struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1155
1156	if (!mq)
1157		return NULL;
1158
1159	init_policy_functions(mq);
1160	iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
1161	mq->cache_size = cache_size;
1162
1163	if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1164		DMERR("couldn't initialize pool of pre-cache entries");
1165		goto bad_pre_cache_init;
1166	}
1167
1168	if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1169		DMERR("couldn't initialize pool of cache entries");
1170		goto bad_cache_init;
1171	}
1172
1173	mq->tick_protected = 0;
1174	mq->tick = 0;
1175	mq->hit_count = 0;
1176	mq->generation = 0;
1177	mq->promote_threshold = 0;
1178	mutex_init(&mq->lock);
1179	spin_lock_init(&mq->tick_lock);
1180
1181	queue_init(&mq->pre_cache);
1182	queue_init(&mq->cache_clean);
1183	queue_init(&mq->cache_dirty);
1184
1185	mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1186
1187	mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1188	mq->hash_bits = ffs(mq->nr_buckets) - 1;
1189	mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
1190	if (!mq->table)
1191		goto bad_alloc_table;
1192
1193	return &mq->policy;
1194
1195bad_alloc_table:
1196	epool_exit(&mq->cache_pool);
1197bad_cache_init:
1198	epool_exit(&mq->pre_cache_pool);
1199bad_pre_cache_init:
1200	kfree(mq);
1201
1202	return NULL;
1203}
1204
1205/*----------------------------------------------------------------*/
1206
1207static struct dm_cache_policy_type mq_policy_type = {
1208	.name = "mq",
1209	.version = {1, 1, 0},
1210	.hint_size = 4,
1211	.owner = THIS_MODULE,
1212	.create = mq_create
1213};
1214
1215static struct dm_cache_policy_type default_policy_type = {
1216	.name = "default",
1217	.version = {1, 1, 0},
1218	.hint_size = 4,
1219	.owner = THIS_MODULE,
1220	.create = mq_create
1221};
1222
1223static int __init mq_init(void)
1224{
1225	int r;
1226
1227	mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1228					   sizeof(struct entry),
1229					   __alignof__(struct entry),
1230					   0, NULL);
1231	if (!mq_entry_cache)
1232		goto bad;
1233
1234	r = dm_cache_policy_register(&mq_policy_type);
1235	if (r) {
1236		DMERR("register failed %d", r);
1237		goto bad_register_mq;
1238	}
1239
1240	r = dm_cache_policy_register(&default_policy_type);
1241	if (!r) {
1242		DMINFO("version %u.%u.%u loaded",
1243		       mq_policy_type.version[0],
1244		       mq_policy_type.version[1],
1245		       mq_policy_type.version[2]);
1246		return 0;
1247	}
1248
1249	DMERR("register failed (as default) %d", r);
1250
1251	dm_cache_policy_unregister(&mq_policy_type);
1252bad_register_mq:
1253	kmem_cache_destroy(mq_entry_cache);
1254bad:
1255	return -ENOMEM;
1256}
1257
1258static void __exit mq_exit(void)
1259{
1260	dm_cache_policy_unregister(&mq_policy_type);
1261	dm_cache_policy_unregister(&default_policy_type);
1262
1263	kmem_cache_destroy(mq_entry_cache);
1264}
1265
1266module_init(mq_init);
1267module_exit(mq_exit);
1268
1269MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1270MODULE_LICENSE("GPL");
1271MODULE_DESCRIPTION("mq cache policy");
1272
1273MODULE_ALIAS("dm-cache-default");
1274