[go: nahoru, domu]

1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
13#include "writeback.h"
14
15#include <linux/module.h>
16#include <linux/hash.h>
17#include <linux/random.h>
18
19#include <trace/events/bcache.h>
20
21#define CUTOFF_CACHE_ADD	95
22#define CUTOFF_CACHE_READA	90
23
24struct kmem_cache *bch_search_cache;
25
26static void bch_data_insert_start(struct closure *);
27
28static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
29{
30	return BDEV_CACHE_MODE(&dc->sb);
31}
32
33static bool verify(struct cached_dev *dc, struct bio *bio)
34{
35	return dc->verify;
36}
37
38static void bio_csum(struct bio *bio, struct bkey *k)
39{
40	struct bio_vec bv;
41	struct bvec_iter iter;
42	uint64_t csum = 0;
43
44	bio_for_each_segment(bv, bio, iter) {
45		void *d = kmap(bv.bv_page) + bv.bv_offset;
46		csum = bch_crc64_update(csum, d, bv.bv_len);
47		kunmap(bv.bv_page);
48	}
49
50	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
51}
52
53/* Insert data into cache */
54
55static void bch_data_insert_keys(struct closure *cl)
56{
57	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
58	atomic_t *journal_ref = NULL;
59	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
60	int ret;
61
62	/*
63	 * If we're looping, might already be waiting on
64	 * another journal write - can't wait on more than one journal write at
65	 * a time
66	 *
67	 * XXX: this looks wrong
68	 */
69#if 0
70	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
71		closure_sync(&s->cl);
72#endif
73
74	if (!op->replace)
75		journal_ref = bch_journal(op->c, &op->insert_keys,
76					  op->flush_journal ? cl : NULL);
77
78	ret = bch_btree_insert(op->c, &op->insert_keys,
79			       journal_ref, replace_key);
80	if (ret == -ESRCH) {
81		op->replace_collision = true;
82	} else if (ret) {
83		op->error		= -ENOMEM;
84		op->insert_data_done	= true;
85	}
86
87	if (journal_ref)
88		atomic_dec_bug(journal_ref);
89
90	if (!op->insert_data_done)
91		continue_at(cl, bch_data_insert_start, op->wq);
92
93	bch_keylist_free(&op->insert_keys);
94	closure_return(cl);
95}
96
97static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
98			       struct cache_set *c)
99{
100	size_t oldsize = bch_keylist_nkeys(l);
101	size_t newsize = oldsize + u64s;
102
103	/*
104	 * The journalling code doesn't handle the case where the keys to insert
105	 * is bigger than an empty write: If we just return -ENOMEM here,
106	 * bio_insert() and bio_invalidate() will insert the keys created so far
107	 * and finish the rest when the keylist is empty.
108	 */
109	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
110		return -ENOMEM;
111
112	return __bch_keylist_realloc(l, u64s);
113}
114
115static void bch_data_invalidate(struct closure *cl)
116{
117	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
118	struct bio *bio = op->bio;
119
120	pr_debug("invalidating %i sectors from %llu",
121		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
122
123	while (bio_sectors(bio)) {
124		unsigned sectors = min(bio_sectors(bio),
125				       1U << (KEY_SIZE_BITS - 1));
126
127		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
128			goto out;
129
130		bio->bi_iter.bi_sector	+= sectors;
131		bio->bi_iter.bi_size	-= sectors << 9;
132
133		bch_keylist_add(&op->insert_keys,
134				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
135	}
136
137	op->insert_data_done = true;
138	bio_put(bio);
139out:
140	continue_at(cl, bch_data_insert_keys, op->wq);
141}
142
143static void bch_data_insert_error(struct closure *cl)
144{
145	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
146
147	/*
148	 * Our data write just errored, which means we've got a bunch of keys to
149	 * insert that point to data that wasn't succesfully written.
150	 *
151	 * We don't have to insert those keys but we still have to invalidate
152	 * that region of the cache - so, if we just strip off all the pointers
153	 * from the keys we'll accomplish just that.
154	 */
155
156	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
157
158	while (src != op->insert_keys.top) {
159		struct bkey *n = bkey_next(src);
160
161		SET_KEY_PTRS(src, 0);
162		memmove(dst, src, bkey_bytes(src));
163
164		dst = bkey_next(dst);
165		src = n;
166	}
167
168	op->insert_keys.top = dst;
169
170	bch_data_insert_keys(cl);
171}
172
173static void bch_data_insert_endio(struct bio *bio, int error)
174{
175	struct closure *cl = bio->bi_private;
176	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
177
178	if (error) {
179		/* TODO: We could try to recover from this. */
180		if (op->writeback)
181			op->error = error;
182		else if (!op->replace)
183			set_closure_fn(cl, bch_data_insert_error, op->wq);
184		else
185			set_closure_fn(cl, NULL, NULL);
186	}
187
188	bch_bbio_endio(op->c, bio, error, "writing data to cache");
189}
190
191static void bch_data_insert_start(struct closure *cl)
192{
193	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
194	struct bio *bio = op->bio, *n;
195
196	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
197		set_gc_sectors(op->c);
198		wake_up_gc(op->c);
199	}
200
201	if (op->bypass)
202		return bch_data_invalidate(cl);
203
204	/*
205	 * Journal writes are marked REQ_FLUSH; if the original write was a
206	 * flush, it'll wait on the journal write.
207	 */
208	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
209
210	do {
211		unsigned i;
212		struct bkey *k;
213		struct bio_set *split = op->c->bio_split;
214
215		/* 1 for the device pointer and 1 for the chksum */
216		if (bch_keylist_realloc(&op->insert_keys,
217					3 + (op->csum ? 1 : 0),
218					op->c))
219			continue_at(cl, bch_data_insert_keys, op->wq);
220
221		k = op->insert_keys.top;
222		bkey_init(k);
223		SET_KEY_INODE(k, op->inode);
224		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
225
226		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227				       op->write_point, op->write_prio,
228				       op->writeback))
229			goto err;
230
231		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
232
233		n->bi_end_io	= bch_data_insert_endio;
234		n->bi_private	= cl;
235
236		if (op->writeback) {
237			SET_KEY_DIRTY(k, true);
238
239			for (i = 0; i < KEY_PTRS(k); i++)
240				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
241					    GC_MARK_DIRTY);
242		}
243
244		SET_KEY_CSUM(k, op->csum);
245		if (KEY_CSUM(k))
246			bio_csum(n, k);
247
248		trace_bcache_cache_insert(k);
249		bch_keylist_push(&op->insert_keys);
250
251		n->bi_rw |= REQ_WRITE;
252		bch_submit_bbio(n, op->c, k, 0);
253	} while (n != bio);
254
255	op->insert_data_done = true;
256	continue_at(cl, bch_data_insert_keys, op->wq);
257err:
258	/* bch_alloc_sectors() blocks if s->writeback = true */
259	BUG_ON(op->writeback);
260
261	/*
262	 * But if it's not a writeback write we'd rather just bail out if
263	 * there aren't any buckets ready to write to - it might take awhile and
264	 * we might be starving btree writes for gc or something.
265	 */
266
267	if (!op->replace) {
268		/*
269		 * Writethrough write: We can't complete the write until we've
270		 * updated the index. But we don't want to delay the write while
271		 * we wait for buckets to be freed up, so just invalidate the
272		 * rest of the write.
273		 */
274		op->bypass = true;
275		return bch_data_invalidate(cl);
276	} else {
277		/*
278		 * From a cache miss, we can just insert the keys for the data
279		 * we have written or bail out if we didn't do anything.
280		 */
281		op->insert_data_done = true;
282		bio_put(bio);
283
284		if (!bch_keylist_empty(&op->insert_keys))
285			continue_at(cl, bch_data_insert_keys, op->wq);
286		else
287			closure_return(cl);
288	}
289}
290
291/**
292 * bch_data_insert - stick some data in the cache
293 *
294 * This is the starting point for any data to end up in a cache device; it could
295 * be from a normal write, or a writeback write, or a write to a flash only
296 * volume - it's also used by the moving garbage collector to compact data in
297 * mostly empty buckets.
298 *
299 * It first writes the data to the cache, creating a list of keys to be inserted
300 * (if the data had to be fragmented there will be multiple keys); after the
301 * data is written it calls bch_journal, and after the keys have been added to
302 * the next journal write they're inserted into the btree.
303 *
304 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
305 * and op->inode is used for the key inode.
306 *
307 * If s->bypass is true, instead of inserting the data it invalidates the
308 * region of the cache represented by s->cache_bio and op->inode.
309 */
310void bch_data_insert(struct closure *cl)
311{
312	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
313
314	trace_bcache_write(op->c, op->inode, op->bio,
315			   op->writeback, op->bypass);
316
317	bch_keylist_init(&op->insert_keys);
318	bio_get(op->bio);
319	bch_data_insert_start(cl);
320}
321
322/* Congested? */
323
324unsigned bch_get_congested(struct cache_set *c)
325{
326	int i;
327	long rand;
328
329	if (!c->congested_read_threshold_us &&
330	    !c->congested_write_threshold_us)
331		return 0;
332
333	i = (local_clock_us() - c->congested_last_us) / 1024;
334	if (i < 0)
335		return 0;
336
337	i += atomic_read(&c->congested);
338	if (i >= 0)
339		return 0;
340
341	i += CONGESTED_MAX;
342
343	if (i > 0)
344		i = fract_exp_two(i, 6);
345
346	rand = get_random_int();
347	i -= bitmap_weight(&rand, BITS_PER_LONG);
348
349	return i > 0 ? i : 1;
350}
351
352static void add_sequential(struct task_struct *t)
353{
354	ewma_add(t->sequential_io_avg,
355		 t->sequential_io, 8, 0);
356
357	t->sequential_io = 0;
358}
359
360static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
361{
362	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
363}
364
365static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
366{
367	struct cache_set *c = dc->disk.c;
368	unsigned mode = cache_mode(dc, bio);
369	unsigned sectors, congested = bch_get_congested(c);
370	struct task_struct *task = current;
371	struct io *i;
372
373	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
374	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
375	    (bio->bi_rw & REQ_DISCARD))
376		goto skip;
377
378	if (mode == CACHE_MODE_NONE ||
379	    (mode == CACHE_MODE_WRITEAROUND &&
380	     (bio->bi_rw & REQ_WRITE)))
381		goto skip;
382
383	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
384	    bio_sectors(bio) & (c->sb.block_size - 1)) {
385		pr_debug("skipping unaligned io");
386		goto skip;
387	}
388
389	if (bypass_torture_test(dc)) {
390		if ((get_random_int() & 3) == 3)
391			goto skip;
392		else
393			goto rescale;
394	}
395
396	if (!congested && !dc->sequential_cutoff)
397		goto rescale;
398
399	if (!congested &&
400	    mode == CACHE_MODE_WRITEBACK &&
401	    (bio->bi_rw & REQ_WRITE) &&
402	    (bio->bi_rw & REQ_SYNC))
403		goto rescale;
404
405	spin_lock(&dc->io_lock);
406
407	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
408		if (i->last == bio->bi_iter.bi_sector &&
409		    time_before(jiffies, i->jiffies))
410			goto found;
411
412	i = list_first_entry(&dc->io_lru, struct io, lru);
413
414	add_sequential(task);
415	i->sequential = 0;
416found:
417	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
418		i->sequential	+= bio->bi_iter.bi_size;
419
420	i->last			 = bio_end_sector(bio);
421	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
422	task->sequential_io	 = i->sequential;
423
424	hlist_del(&i->hash);
425	hlist_add_head(&i->hash, iohash(dc, i->last));
426	list_move_tail(&i->lru, &dc->io_lru);
427
428	spin_unlock(&dc->io_lock);
429
430	sectors = max(task->sequential_io,
431		      task->sequential_io_avg) >> 9;
432
433	if (dc->sequential_cutoff &&
434	    sectors >= dc->sequential_cutoff >> 9) {
435		trace_bcache_bypass_sequential(bio);
436		goto skip;
437	}
438
439	if (congested && sectors >= congested) {
440		trace_bcache_bypass_congested(bio);
441		goto skip;
442	}
443
444rescale:
445	bch_rescale_priorities(c, bio_sectors(bio));
446	return false;
447skip:
448	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
449	return true;
450}
451
452/* Cache lookup */
453
454struct search {
455	/* Stack frame for bio_complete */
456	struct closure		cl;
457
458	struct bbio		bio;
459	struct bio		*orig_bio;
460	struct bio		*cache_miss;
461	struct bcache_device	*d;
462
463	unsigned		insert_bio_sectors;
464	unsigned		recoverable:1;
465	unsigned		write:1;
466	unsigned		read_dirty_data:1;
467
468	unsigned long		start_time;
469
470	struct btree_op		op;
471	struct data_insert_op	iop;
472};
473
474static void bch_cache_read_endio(struct bio *bio, int error)
475{
476	struct bbio *b = container_of(bio, struct bbio, bio);
477	struct closure *cl = bio->bi_private;
478	struct search *s = container_of(cl, struct search, cl);
479
480	/*
481	 * If the bucket was reused while our bio was in flight, we might have
482	 * read the wrong data. Set s->error but not error so it doesn't get
483	 * counted against the cache device, but we'll still reread the data
484	 * from the backing device.
485	 */
486
487	if (error)
488		s->iop.error = error;
489	else if (!KEY_DIRTY(&b->key) &&
490		 ptr_stale(s->iop.c, &b->key, 0)) {
491		atomic_long_inc(&s->iop.c->cache_read_races);
492		s->iop.error = -EINTR;
493	}
494
495	bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
496}
497
498/*
499 * Read from a single key, handling the initial cache miss if the key starts in
500 * the middle of the bio
501 */
502static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
503{
504	struct search *s = container_of(op, struct search, op);
505	struct bio *n, *bio = &s->bio.bio;
506	struct bkey *bio_key;
507	unsigned ptr;
508
509	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
510		return MAP_CONTINUE;
511
512	if (KEY_INODE(k) != s->iop.inode ||
513	    KEY_START(k) > bio->bi_iter.bi_sector) {
514		unsigned bio_sectors = bio_sectors(bio);
515		unsigned sectors = KEY_INODE(k) == s->iop.inode
516			? min_t(uint64_t, INT_MAX,
517				KEY_START(k) - bio->bi_iter.bi_sector)
518			: INT_MAX;
519
520		int ret = s->d->cache_miss(b, s, bio, sectors);
521		if (ret != MAP_CONTINUE)
522			return ret;
523
524		/* if this was a complete miss we shouldn't get here */
525		BUG_ON(bio_sectors <= sectors);
526	}
527
528	if (!KEY_SIZE(k))
529		return MAP_CONTINUE;
530
531	/* XXX: figure out best pointer - for multiple cache devices */
532	ptr = 0;
533
534	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
535
536	if (KEY_DIRTY(k))
537		s->read_dirty_data = true;
538
539	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
540				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
541			   GFP_NOIO, s->d->bio_split);
542
543	bio_key = &container_of(n, struct bbio, bio)->key;
544	bch_bkey_copy_single_ptr(bio_key, k, ptr);
545
546	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
547	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
548
549	n->bi_end_io	= bch_cache_read_endio;
550	n->bi_private	= &s->cl;
551
552	/*
553	 * The bucket we're reading from might be reused while our bio
554	 * is in flight, and we could then end up reading the wrong
555	 * data.
556	 *
557	 * We guard against this by checking (in cache_read_endio()) if
558	 * the pointer is stale again; if so, we treat it as an error
559	 * and reread from the backing device (but we don't pass that
560	 * error up anywhere).
561	 */
562
563	__bch_submit_bbio(n, b->c);
564	return n == bio ? MAP_DONE : MAP_CONTINUE;
565}
566
567static void cache_lookup(struct closure *cl)
568{
569	struct search *s = container_of(cl, struct search, iop.cl);
570	struct bio *bio = &s->bio.bio;
571	int ret;
572
573	bch_btree_op_init(&s->op, -1);
574
575	ret = bch_btree_map_keys(&s->op, s->iop.c,
576				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
577				 cache_lookup_fn, MAP_END_KEY);
578	if (ret == -EAGAIN)
579		continue_at(cl, cache_lookup, bcache_wq);
580
581	closure_return(cl);
582}
583
584/* Common code for the make_request functions */
585
586static void request_endio(struct bio *bio, int error)
587{
588	struct closure *cl = bio->bi_private;
589
590	if (error) {
591		struct search *s = container_of(cl, struct search, cl);
592		s->iop.error = error;
593		/* Only cache read errors are recoverable */
594		s->recoverable = false;
595	}
596
597	bio_put(bio);
598	closure_put(cl);
599}
600
601static void bio_complete(struct search *s)
602{
603	if (s->orig_bio) {
604		int cpu, rw = bio_data_dir(s->orig_bio);
605		unsigned long duration = jiffies - s->start_time;
606
607		cpu = part_stat_lock();
608		part_round_stats(cpu, &s->d->disk->part0);
609		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
610		part_stat_unlock();
611
612		trace_bcache_request_end(s->d, s->orig_bio);
613		bio_endio(s->orig_bio, s->iop.error);
614		s->orig_bio = NULL;
615	}
616}
617
618static void do_bio_hook(struct search *s, struct bio *orig_bio)
619{
620	struct bio *bio = &s->bio.bio;
621
622	bio_init(bio);
623	__bio_clone_fast(bio, orig_bio);
624	bio->bi_end_io		= request_endio;
625	bio->bi_private		= &s->cl;
626
627	atomic_set(&bio->bi_cnt, 3);
628}
629
630static void search_free(struct closure *cl)
631{
632	struct search *s = container_of(cl, struct search, cl);
633	bio_complete(s);
634
635	if (s->iop.bio)
636		bio_put(s->iop.bio);
637
638	closure_debug_destroy(cl);
639	mempool_free(s, s->d->c->search);
640}
641
642static inline struct search *search_alloc(struct bio *bio,
643					  struct bcache_device *d)
644{
645	struct search *s;
646
647	s = mempool_alloc(d->c->search, GFP_NOIO);
648
649	closure_init(&s->cl, NULL);
650	do_bio_hook(s, bio);
651
652	s->orig_bio		= bio;
653	s->cache_miss		= NULL;
654	s->d			= d;
655	s->recoverable		= 1;
656	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
657	s->read_dirty_data	= 0;
658	s->start_time		= jiffies;
659
660	s->iop.c		= d->c;
661	s->iop.bio		= NULL;
662	s->iop.inode		= d->id;
663	s->iop.write_point	= hash_long((unsigned long) current, 16);
664	s->iop.write_prio	= 0;
665	s->iop.error		= 0;
666	s->iop.flags		= 0;
667	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
668	s->iop.wq		= bcache_wq;
669
670	return s;
671}
672
673/* Cached devices */
674
675static void cached_dev_bio_complete(struct closure *cl)
676{
677	struct search *s = container_of(cl, struct search, cl);
678	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
679
680	search_free(cl);
681	cached_dev_put(dc);
682}
683
684/* Process reads */
685
686static void cached_dev_cache_miss_done(struct closure *cl)
687{
688	struct search *s = container_of(cl, struct search, cl);
689
690	if (s->iop.replace_collision)
691		bch_mark_cache_miss_collision(s->iop.c, s->d);
692
693	if (s->iop.bio) {
694		int i;
695		struct bio_vec *bv;
696
697		bio_for_each_segment_all(bv, s->iop.bio, i)
698			__free_page(bv->bv_page);
699	}
700
701	cached_dev_bio_complete(cl);
702}
703
704static void cached_dev_read_error(struct closure *cl)
705{
706	struct search *s = container_of(cl, struct search, cl);
707	struct bio *bio = &s->bio.bio;
708
709	if (s->recoverable) {
710		/* Retry from the backing device: */
711		trace_bcache_read_retry(s->orig_bio);
712
713		s->iop.error = 0;
714		do_bio_hook(s, s->orig_bio);
715
716		/* XXX: invalidate cache */
717
718		closure_bio_submit(bio, cl, s->d);
719	}
720
721	continue_at(cl, cached_dev_cache_miss_done, NULL);
722}
723
724static void cached_dev_read_done(struct closure *cl)
725{
726	struct search *s = container_of(cl, struct search, cl);
727	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
728
729	/*
730	 * We had a cache miss; cache_bio now contains data ready to be inserted
731	 * into the cache.
732	 *
733	 * First, we copy the data we just read from cache_bio's bounce buffers
734	 * to the buffers the original bio pointed to:
735	 */
736
737	if (s->iop.bio) {
738		bio_reset(s->iop.bio);
739		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
740		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
741		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
742		bch_bio_map(s->iop.bio, NULL);
743
744		bio_copy_data(s->cache_miss, s->iop.bio);
745
746		bio_put(s->cache_miss);
747		s->cache_miss = NULL;
748	}
749
750	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
751		bch_data_verify(dc, s->orig_bio);
752
753	bio_complete(s);
754
755	if (s->iop.bio &&
756	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
757		BUG_ON(!s->iop.replace);
758		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
759	}
760
761	continue_at(cl, cached_dev_cache_miss_done, NULL);
762}
763
764static void cached_dev_read_done_bh(struct closure *cl)
765{
766	struct search *s = container_of(cl, struct search, cl);
767	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
768
769	bch_mark_cache_accounting(s->iop.c, s->d,
770				  !s->cache_miss, s->iop.bypass);
771	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
772
773	if (s->iop.error)
774		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
775	else if (s->iop.bio || verify(dc, &s->bio.bio))
776		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
777	else
778		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
779}
780
781static int cached_dev_cache_miss(struct btree *b, struct search *s,
782				 struct bio *bio, unsigned sectors)
783{
784	int ret = MAP_CONTINUE;
785	unsigned reada = 0;
786	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
787	struct bio *miss, *cache_bio;
788
789	if (s->cache_miss || s->iop.bypass) {
790		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
791		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
792		goto out_submit;
793	}
794
795	if (!(bio->bi_rw & REQ_RAHEAD) &&
796	    !(bio->bi_rw & REQ_META) &&
797	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
798		reada = min_t(sector_t, dc->readahead >> 9,
799			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
800
801	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
802
803	s->iop.replace_key = KEY(s->iop.inode,
804				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
805				 s->insert_bio_sectors);
806
807	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
808	if (ret)
809		return ret;
810
811	s->iop.replace = true;
812
813	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
814
815	/* btree_search_recurse()'s btree iterator is no good anymore */
816	ret = miss == bio ? MAP_DONE : -EINTR;
817
818	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
819			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
820			dc->disk.bio_split);
821	if (!cache_bio)
822		goto out_submit;
823
824	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
825	cache_bio->bi_bdev		= miss->bi_bdev;
826	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
827
828	cache_bio->bi_end_io	= request_endio;
829	cache_bio->bi_private	= &s->cl;
830
831	bch_bio_map(cache_bio, NULL);
832	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
833		goto out_put;
834
835	if (reada)
836		bch_mark_cache_readahead(s->iop.c, s->d);
837
838	s->cache_miss	= miss;
839	s->iop.bio	= cache_bio;
840	bio_get(cache_bio);
841	closure_bio_submit(cache_bio, &s->cl, s->d);
842
843	return ret;
844out_put:
845	bio_put(cache_bio);
846out_submit:
847	miss->bi_end_io		= request_endio;
848	miss->bi_private	= &s->cl;
849	closure_bio_submit(miss, &s->cl, s->d);
850	return ret;
851}
852
853static void cached_dev_read(struct cached_dev *dc, struct search *s)
854{
855	struct closure *cl = &s->cl;
856
857	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
858	continue_at(cl, cached_dev_read_done_bh, NULL);
859}
860
861/* Process writes */
862
863static void cached_dev_write_complete(struct closure *cl)
864{
865	struct search *s = container_of(cl, struct search, cl);
866	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
867
868	up_read_non_owner(&dc->writeback_lock);
869	cached_dev_bio_complete(cl);
870}
871
872static void cached_dev_write(struct cached_dev *dc, struct search *s)
873{
874	struct closure *cl = &s->cl;
875	struct bio *bio = &s->bio.bio;
876	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
877	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
878
879	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
880
881	down_read_non_owner(&dc->writeback_lock);
882	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
883		/*
884		 * We overlap with some dirty data undergoing background
885		 * writeback, force this write to writeback
886		 */
887		s->iop.bypass = false;
888		s->iop.writeback = true;
889	}
890
891	/*
892	 * Discards aren't _required_ to do anything, so skipping if
893	 * check_overlapping returned true is ok
894	 *
895	 * But check_overlapping drops dirty keys for which io hasn't started,
896	 * so we still want to call it.
897	 */
898	if (bio->bi_rw & REQ_DISCARD)
899		s->iop.bypass = true;
900
901	if (should_writeback(dc, s->orig_bio,
902			     cache_mode(dc, bio),
903			     s->iop.bypass)) {
904		s->iop.bypass = false;
905		s->iop.writeback = true;
906	}
907
908	if (s->iop.bypass) {
909		s->iop.bio = s->orig_bio;
910		bio_get(s->iop.bio);
911
912		if (!(bio->bi_rw & REQ_DISCARD) ||
913		    blk_queue_discard(bdev_get_queue(dc->bdev)))
914			closure_bio_submit(bio, cl, s->d);
915	} else if (s->iop.writeback) {
916		bch_writeback_add(dc);
917		s->iop.bio = bio;
918
919		if (bio->bi_rw & REQ_FLUSH) {
920			/* Also need to send a flush to the backing device */
921			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
922							     dc->disk.bio_split);
923
924			flush->bi_rw	= WRITE_FLUSH;
925			flush->bi_bdev	= bio->bi_bdev;
926			flush->bi_end_io = request_endio;
927			flush->bi_private = cl;
928
929			closure_bio_submit(flush, cl, s->d);
930		}
931	} else {
932		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
933
934		closure_bio_submit(bio, cl, s->d);
935	}
936
937	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
938	continue_at(cl, cached_dev_write_complete, NULL);
939}
940
941static void cached_dev_nodata(struct closure *cl)
942{
943	struct search *s = container_of(cl, struct search, cl);
944	struct bio *bio = &s->bio.bio;
945
946	if (s->iop.flush_journal)
947		bch_journal_meta(s->iop.c, cl);
948
949	/* If it's a flush, we send the flush to the backing device too */
950	closure_bio_submit(bio, cl, s->d);
951
952	continue_at(cl, cached_dev_bio_complete, NULL);
953}
954
955/* Cached devices - read & write stuff */
956
957static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
958{
959	struct search *s;
960	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
961	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
962	int cpu, rw = bio_data_dir(bio);
963
964	cpu = part_stat_lock();
965	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
966	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
967	part_stat_unlock();
968
969	bio->bi_bdev = dc->bdev;
970	bio->bi_iter.bi_sector += dc->sb.data_offset;
971
972	if (cached_dev_get(dc)) {
973		s = search_alloc(bio, d);
974		trace_bcache_request_start(s->d, bio);
975
976		if (!bio->bi_iter.bi_size) {
977			/*
978			 * can't call bch_journal_meta from under
979			 * generic_make_request
980			 */
981			continue_at_nobarrier(&s->cl,
982					      cached_dev_nodata,
983					      bcache_wq);
984		} else {
985			s->iop.bypass = check_should_bypass(dc, bio);
986
987			if (rw)
988				cached_dev_write(dc, s);
989			else
990				cached_dev_read(dc, s);
991		}
992	} else {
993		if ((bio->bi_rw & REQ_DISCARD) &&
994		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
995			bio_endio(bio, 0);
996		else
997			bch_generic_make_request(bio, &d->bio_split_hook);
998	}
999}
1000
1001static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1002			    unsigned int cmd, unsigned long arg)
1003{
1004	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1005	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1006}
1007
1008static int cached_dev_congested(void *data, int bits)
1009{
1010	struct bcache_device *d = data;
1011	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1012	struct request_queue *q = bdev_get_queue(dc->bdev);
1013	int ret = 0;
1014
1015	if (bdi_congested(&q->backing_dev_info, bits))
1016		return 1;
1017
1018	if (cached_dev_get(dc)) {
1019		unsigned i;
1020		struct cache *ca;
1021
1022		for_each_cache(ca, d->c, i) {
1023			q = bdev_get_queue(ca->bdev);
1024			ret |= bdi_congested(&q->backing_dev_info, bits);
1025		}
1026
1027		cached_dev_put(dc);
1028	}
1029
1030	return ret;
1031}
1032
1033void bch_cached_dev_request_init(struct cached_dev *dc)
1034{
1035	struct gendisk *g = dc->disk.disk;
1036
1037	g->queue->make_request_fn		= cached_dev_make_request;
1038	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1039	dc->disk.cache_miss			= cached_dev_cache_miss;
1040	dc->disk.ioctl				= cached_dev_ioctl;
1041}
1042
1043/* Flash backed devices */
1044
1045static int flash_dev_cache_miss(struct btree *b, struct search *s,
1046				struct bio *bio, unsigned sectors)
1047{
1048	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1049
1050	swap(bio->bi_iter.bi_size, bytes);
1051	zero_fill_bio(bio);
1052	swap(bio->bi_iter.bi_size, bytes);
1053
1054	bio_advance(bio, bytes);
1055
1056	if (!bio->bi_iter.bi_size)
1057		return MAP_DONE;
1058
1059	return MAP_CONTINUE;
1060}
1061
1062static void flash_dev_nodata(struct closure *cl)
1063{
1064	struct search *s = container_of(cl, struct search, cl);
1065
1066	if (s->iop.flush_journal)
1067		bch_journal_meta(s->iop.c, cl);
1068
1069	continue_at(cl, search_free, NULL);
1070}
1071
1072static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1073{
1074	struct search *s;
1075	struct closure *cl;
1076	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1077	int cpu, rw = bio_data_dir(bio);
1078
1079	cpu = part_stat_lock();
1080	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1081	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1082	part_stat_unlock();
1083
1084	s = search_alloc(bio, d);
1085	cl = &s->cl;
1086	bio = &s->bio.bio;
1087
1088	trace_bcache_request_start(s->d, bio);
1089
1090	if (!bio->bi_iter.bi_size) {
1091		/*
1092		 * can't call bch_journal_meta from under
1093		 * generic_make_request
1094		 */
1095		continue_at_nobarrier(&s->cl,
1096				      flash_dev_nodata,
1097				      bcache_wq);
1098	} else if (rw) {
1099		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1100					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1101					&KEY(d->id, bio_end_sector(bio), 0));
1102
1103		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
1104		s->iop.writeback	= true;
1105		s->iop.bio		= bio;
1106
1107		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1108	} else {
1109		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1110	}
1111
1112	continue_at(cl, search_free, NULL);
1113}
1114
1115static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1116			   unsigned int cmd, unsigned long arg)
1117{
1118	return -ENOTTY;
1119}
1120
1121static int flash_dev_congested(void *data, int bits)
1122{
1123	struct bcache_device *d = data;
1124	struct request_queue *q;
1125	struct cache *ca;
1126	unsigned i;
1127	int ret = 0;
1128
1129	for_each_cache(ca, d->c, i) {
1130		q = bdev_get_queue(ca->bdev);
1131		ret |= bdi_congested(&q->backing_dev_info, bits);
1132	}
1133
1134	return ret;
1135}
1136
1137void bch_flash_dev_request_init(struct bcache_device *d)
1138{
1139	struct gendisk *g = d->disk;
1140
1141	g->queue->make_request_fn		= flash_dev_make_request;
1142	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1143	d->cache_miss				= flash_dev_cache_miss;
1144	d->ioctl				= flash_dev_ioctl;
1145}
1146
1147void bch_request_exit(void)
1148{
1149	if (bch_search_cache)
1150		kmem_cache_destroy(bch_search_cache);
1151}
1152
1153int __init bch_request_init(void)
1154{
1155	bch_search_cache = KMEM_CACHE(search, 0);
1156	if (!bch_search_cache)
1157		return -ENOMEM;
1158
1159	return 0;
1160}
1161