[go: nahoru, domu]

1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#define pr_fmt(fmt) "[TTM] " fmt
32
33#include <drm/ttm/ttm_module.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_placement.h>
36#include <linux/jiffies.h>
37#include <linux/slab.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/file.h>
41#include <linux/module.h>
42#include <linux/atomic.h>
43#include <linux/reservation.h>
44
45#define TTM_ASSERT_LOCKED(param)
46#define TTM_DEBUG(fmt, arg...)
47#define TTM_BO_HASH_ORDER 13
48
49static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52static struct attribute ttm_bo_count = {
53	.name = "bo_count",
54	.mode = S_IRUGO
55};
56
57static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58					  uint32_t *mem_type)
59{
60	int i;
61
62	for (i = 0; i <= TTM_PL_PRIV5; i++)
63		if (place->flags & (1 << i)) {
64			*mem_type = i;
65			return 0;
66		}
67	return -EINVAL;
68}
69
70static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71{
72	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73
74	pr_err("    has_type: %d\n", man->has_type);
75	pr_err("    use_type: %d\n", man->use_type);
76	pr_err("    flags: 0x%08X\n", man->flags);
77	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
78	pr_err("    size: %llu\n", man->size);
79	pr_err("    available_caching: 0x%08X\n", man->available_caching);
80	pr_err("    default_caching: 0x%08X\n", man->default_caching);
81	if (mem_type != TTM_PL_SYSTEM)
82		(*man->func->debug)(man, TTM_PFX);
83}
84
85static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
86					struct ttm_placement *placement)
87{
88	int i, ret, mem_type;
89
90	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
91	       bo, bo->mem.num_pages, bo->mem.size >> 10,
92	       bo->mem.size >> 20);
93	for (i = 0; i < placement->num_placement; i++) {
94		ret = ttm_mem_type_from_place(&placement->placement[i],
95						&mem_type);
96		if (ret)
97			return;
98		pr_err("  placement[%d]=0x%08X (%d)\n",
99		       i, placement->placement[i].flags, mem_type);
100		ttm_mem_type_debug(bo->bdev, mem_type);
101	}
102}
103
104static ssize_t ttm_bo_global_show(struct kobject *kobj,
105				  struct attribute *attr,
106				  char *buffer)
107{
108	struct ttm_bo_global *glob =
109		container_of(kobj, struct ttm_bo_global, kobj);
110
111	return snprintf(buffer, PAGE_SIZE, "%lu\n",
112			(unsigned long) atomic_read(&glob->bo_count));
113}
114
115static struct attribute *ttm_bo_global_attrs[] = {
116	&ttm_bo_count,
117	NULL
118};
119
120static const struct sysfs_ops ttm_bo_global_ops = {
121	.show = &ttm_bo_global_show
122};
123
124static struct kobj_type ttm_bo_glob_kobj_type  = {
125	.release = &ttm_bo_global_kobj_release,
126	.sysfs_ops = &ttm_bo_global_ops,
127	.default_attrs = ttm_bo_global_attrs
128};
129
130
131static inline uint32_t ttm_bo_type_flags(unsigned type)
132{
133	return 1 << (type);
134}
135
136static void ttm_bo_release_list(struct kref *list_kref)
137{
138	struct ttm_buffer_object *bo =
139	    container_of(list_kref, struct ttm_buffer_object, list_kref);
140	struct ttm_bo_device *bdev = bo->bdev;
141	size_t acc_size = bo->acc_size;
142
143	BUG_ON(atomic_read(&bo->list_kref.refcount));
144	BUG_ON(atomic_read(&bo->kref.refcount));
145	BUG_ON(atomic_read(&bo->cpu_writers));
146	BUG_ON(bo->mem.mm_node != NULL);
147	BUG_ON(!list_empty(&bo->lru));
148	BUG_ON(!list_empty(&bo->ddestroy));
149
150	if (bo->ttm)
151		ttm_tt_destroy(bo->ttm);
152	atomic_dec(&bo->glob->bo_count);
153	if (bo->resv == &bo->ttm_resv)
154		reservation_object_fini(&bo->ttm_resv);
155	mutex_destroy(&bo->wu_mutex);
156	if (bo->destroy)
157		bo->destroy(bo);
158	else {
159		kfree(bo);
160	}
161	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
162}
163
164void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
165{
166	struct ttm_bo_device *bdev = bo->bdev;
167	struct ttm_mem_type_manager *man;
168
169	lockdep_assert_held(&bo->resv->lock.base);
170
171	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
172
173		BUG_ON(!list_empty(&bo->lru));
174
175		man = &bdev->man[bo->mem.mem_type];
176		list_add_tail(&bo->lru, &man->lru);
177		kref_get(&bo->list_kref);
178
179		if (bo->ttm != NULL) {
180			list_add_tail(&bo->swap, &bo->glob->swap_lru);
181			kref_get(&bo->list_kref);
182		}
183	}
184}
185EXPORT_SYMBOL(ttm_bo_add_to_lru);
186
187int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
188{
189	int put_count = 0;
190
191	if (!list_empty(&bo->swap)) {
192		list_del_init(&bo->swap);
193		++put_count;
194	}
195	if (!list_empty(&bo->lru)) {
196		list_del_init(&bo->lru);
197		++put_count;
198	}
199
200	/*
201	 * TODO: Add a driver hook to delete from
202	 * driver-specific LRU's here.
203	 */
204
205	return put_count;
206}
207
208static void ttm_bo_ref_bug(struct kref *list_kref)
209{
210	BUG();
211}
212
213void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
214			 bool never_free)
215{
216	kref_sub(&bo->list_kref, count,
217		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
218}
219
220void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
221{
222	int put_count;
223
224	spin_lock(&bo->glob->lru_lock);
225	put_count = ttm_bo_del_from_lru(bo);
226	spin_unlock(&bo->glob->lru_lock);
227	ttm_bo_list_ref_sub(bo, put_count, true);
228}
229EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230
231/*
232 * Call bo->mutex locked.
233 */
234static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
235{
236	struct ttm_bo_device *bdev = bo->bdev;
237	struct ttm_bo_global *glob = bo->glob;
238	int ret = 0;
239	uint32_t page_flags = 0;
240
241	TTM_ASSERT_LOCKED(&bo->mutex);
242	bo->ttm = NULL;
243
244	if (bdev->need_dma32)
245		page_flags |= TTM_PAGE_FLAG_DMA32;
246
247	switch (bo->type) {
248	case ttm_bo_type_device:
249		if (zero_alloc)
250			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
251	case ttm_bo_type_kernel:
252		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
253						      page_flags, glob->dummy_read_page);
254		if (unlikely(bo->ttm == NULL))
255			ret = -ENOMEM;
256		break;
257	case ttm_bo_type_sg:
258		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
259						      page_flags | TTM_PAGE_FLAG_SG,
260						      glob->dummy_read_page);
261		if (unlikely(bo->ttm == NULL)) {
262			ret = -ENOMEM;
263			break;
264		}
265		bo->ttm->sg = bo->sg;
266		break;
267	default:
268		pr_err("Illegal buffer object type\n");
269		ret = -EINVAL;
270		break;
271	}
272
273	return ret;
274}
275
276static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
277				  struct ttm_mem_reg *mem,
278				  bool evict, bool interruptible,
279				  bool no_wait_gpu)
280{
281	struct ttm_bo_device *bdev = bo->bdev;
282	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
283	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
284	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
285	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
286	int ret = 0;
287
288	if (old_is_pci || new_is_pci ||
289	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
290		ret = ttm_mem_io_lock(old_man, true);
291		if (unlikely(ret != 0))
292			goto out_err;
293		ttm_bo_unmap_virtual_locked(bo);
294		ttm_mem_io_unlock(old_man);
295	}
296
297	/*
298	 * Create and bind a ttm if required.
299	 */
300
301	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
302		if (bo->ttm == NULL) {
303			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
304			ret = ttm_bo_add_ttm(bo, zero);
305			if (ret)
306				goto out_err;
307		}
308
309		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
310		if (ret)
311			goto out_err;
312
313		if (mem->mem_type != TTM_PL_SYSTEM) {
314			ret = ttm_tt_bind(bo->ttm, mem);
315			if (ret)
316				goto out_err;
317		}
318
319		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
320			if (bdev->driver->move_notify)
321				bdev->driver->move_notify(bo, mem);
322			bo->mem = *mem;
323			mem->mm_node = NULL;
324			goto moved;
325		}
326	}
327
328	if (bdev->driver->move_notify)
329		bdev->driver->move_notify(bo, mem);
330
331	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
332	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
333		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
334	else if (bdev->driver->move)
335		ret = bdev->driver->move(bo, evict, interruptible,
336					 no_wait_gpu, mem);
337	else
338		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
339
340	if (ret) {
341		if (bdev->driver->move_notify) {
342			struct ttm_mem_reg tmp_mem = *mem;
343			*mem = bo->mem;
344			bo->mem = tmp_mem;
345			bdev->driver->move_notify(bo, mem);
346			bo->mem = *mem;
347			*mem = tmp_mem;
348		}
349
350		goto out_err;
351	}
352
353moved:
354	if (bo->evicted) {
355		if (bdev->driver->invalidate_caches) {
356			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
357			if (ret)
358				pr_err("Can not flush read caches\n");
359		}
360		bo->evicted = false;
361	}
362
363	if (bo->mem.mm_node) {
364		bo->offset = (bo->mem.start << PAGE_SHIFT) +
365		    bdev->man[bo->mem.mem_type].gpu_offset;
366		bo->cur_placement = bo->mem.placement;
367	} else
368		bo->offset = 0;
369
370	return 0;
371
372out_err:
373	new_man = &bdev->man[bo->mem.mem_type];
374	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
375		ttm_tt_unbind(bo->ttm);
376		ttm_tt_destroy(bo->ttm);
377		bo->ttm = NULL;
378	}
379
380	return ret;
381}
382
383/**
384 * Call bo::reserved.
385 * Will release GPU memory type usage on destruction.
386 * This is the place to put in driver specific hooks to release
387 * driver private resources.
388 * Will release the bo::reserved lock.
389 */
390
391static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
392{
393	if (bo->bdev->driver->move_notify)
394		bo->bdev->driver->move_notify(bo, NULL);
395
396	if (bo->ttm) {
397		ttm_tt_unbind(bo->ttm);
398		ttm_tt_destroy(bo->ttm);
399		bo->ttm = NULL;
400	}
401	ttm_bo_mem_put(bo, &bo->mem);
402
403	ww_mutex_unlock (&bo->resv->lock);
404}
405
406static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
407{
408	struct reservation_object_list *fobj;
409	struct fence *fence;
410	int i;
411
412	fobj = reservation_object_get_list(bo->resv);
413	fence = reservation_object_get_excl(bo->resv);
414	if (fence && !fence->ops->signaled)
415		fence_enable_sw_signaling(fence);
416
417	for (i = 0; fobj && i < fobj->shared_count; ++i) {
418		fence = rcu_dereference_protected(fobj->shared[i],
419					reservation_object_held(bo->resv));
420
421		if (!fence->ops->signaled)
422			fence_enable_sw_signaling(fence);
423	}
424}
425
426static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
427{
428	struct ttm_bo_device *bdev = bo->bdev;
429	struct ttm_bo_global *glob = bo->glob;
430	int put_count;
431	int ret;
432
433	spin_lock(&glob->lru_lock);
434	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
435
436	if (!ret) {
437		if (!ttm_bo_wait(bo, false, false, true)) {
438			put_count = ttm_bo_del_from_lru(bo);
439
440			spin_unlock(&glob->lru_lock);
441			ttm_bo_cleanup_memtype_use(bo);
442
443			ttm_bo_list_ref_sub(bo, put_count, true);
444
445			return;
446		} else
447			ttm_bo_flush_all_fences(bo);
448
449		/*
450		 * Make NO_EVICT bos immediately available to
451		 * shrinkers, now that they are queued for
452		 * destruction.
453		 */
454		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
455			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
456			ttm_bo_add_to_lru(bo);
457		}
458
459		__ttm_bo_unreserve(bo);
460	}
461
462	kref_get(&bo->list_kref);
463	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
464	spin_unlock(&glob->lru_lock);
465
466	schedule_delayed_work(&bdev->wq,
467			      ((HZ / 100) < 1) ? 1 : HZ / 100);
468}
469
470/**
471 * function ttm_bo_cleanup_refs_and_unlock
472 * If bo idle, remove from delayed- and lru lists, and unref.
473 * If not idle, do nothing.
474 *
475 * Must be called with lru_lock and reservation held, this function
476 * will drop both before returning.
477 *
478 * @interruptible         Any sleeps should occur interruptibly.
479 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
480 */
481
482static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
483					  bool interruptible,
484					  bool no_wait_gpu)
485{
486	struct ttm_bo_global *glob = bo->glob;
487	int put_count;
488	int ret;
489
490	ret = ttm_bo_wait(bo, false, false, true);
491
492	if (ret && !no_wait_gpu) {
493		long lret;
494		ww_mutex_unlock(&bo->resv->lock);
495		spin_unlock(&glob->lru_lock);
496
497		lret = reservation_object_wait_timeout_rcu(bo->resv,
498							   true,
499							   interruptible,
500							   30 * HZ);
501
502		if (lret < 0)
503			return lret;
504		else if (lret == 0)
505			return -EBUSY;
506
507		spin_lock(&glob->lru_lock);
508		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
509
510		/*
511		 * We raced, and lost, someone else holds the reservation now,
512		 * and is probably busy in ttm_bo_cleanup_memtype_use.
513		 *
514		 * Even if it's not the case, because we finished waiting any
515		 * delayed destruction would succeed, so just return success
516		 * here.
517		 */
518		if (ret) {
519			spin_unlock(&glob->lru_lock);
520			return 0;
521		}
522
523		/*
524		 * remove sync_obj with ttm_bo_wait, the wait should be
525		 * finished, and no new wait object should have been added.
526		 */
527		ret = ttm_bo_wait(bo, false, false, true);
528		WARN_ON(ret);
529	}
530
531	if (ret || unlikely(list_empty(&bo->ddestroy))) {
532		__ttm_bo_unreserve(bo);
533		spin_unlock(&glob->lru_lock);
534		return ret;
535	}
536
537	put_count = ttm_bo_del_from_lru(bo);
538	list_del_init(&bo->ddestroy);
539	++put_count;
540
541	spin_unlock(&glob->lru_lock);
542	ttm_bo_cleanup_memtype_use(bo);
543
544	ttm_bo_list_ref_sub(bo, put_count, true);
545
546	return 0;
547}
548
549/**
550 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
551 * encountered buffers.
552 */
553
554static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
555{
556	struct ttm_bo_global *glob = bdev->glob;
557	struct ttm_buffer_object *entry = NULL;
558	int ret = 0;
559
560	spin_lock(&glob->lru_lock);
561	if (list_empty(&bdev->ddestroy))
562		goto out_unlock;
563
564	entry = list_first_entry(&bdev->ddestroy,
565		struct ttm_buffer_object, ddestroy);
566	kref_get(&entry->list_kref);
567
568	for (;;) {
569		struct ttm_buffer_object *nentry = NULL;
570
571		if (entry->ddestroy.next != &bdev->ddestroy) {
572			nentry = list_first_entry(&entry->ddestroy,
573				struct ttm_buffer_object, ddestroy);
574			kref_get(&nentry->list_kref);
575		}
576
577		ret = __ttm_bo_reserve(entry, false, true, false, NULL);
578		if (remove_all && ret) {
579			spin_unlock(&glob->lru_lock);
580			ret = __ttm_bo_reserve(entry, false, false,
581					       false, NULL);
582			spin_lock(&glob->lru_lock);
583		}
584
585		if (!ret)
586			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
587							     !remove_all);
588		else
589			spin_unlock(&glob->lru_lock);
590
591		kref_put(&entry->list_kref, ttm_bo_release_list);
592		entry = nentry;
593
594		if (ret || !entry)
595			goto out;
596
597		spin_lock(&glob->lru_lock);
598		if (list_empty(&entry->ddestroy))
599			break;
600	}
601
602out_unlock:
603	spin_unlock(&glob->lru_lock);
604out:
605	if (entry)
606		kref_put(&entry->list_kref, ttm_bo_release_list);
607	return ret;
608}
609
610static void ttm_bo_delayed_workqueue(struct work_struct *work)
611{
612	struct ttm_bo_device *bdev =
613	    container_of(work, struct ttm_bo_device, wq.work);
614
615	if (ttm_bo_delayed_delete(bdev, false)) {
616		schedule_delayed_work(&bdev->wq,
617				      ((HZ / 100) < 1) ? 1 : HZ / 100);
618	}
619}
620
621static void ttm_bo_release(struct kref *kref)
622{
623	struct ttm_buffer_object *bo =
624	    container_of(kref, struct ttm_buffer_object, kref);
625	struct ttm_bo_device *bdev = bo->bdev;
626	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
627
628	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
629	ttm_mem_io_lock(man, false);
630	ttm_mem_io_free_vm(bo);
631	ttm_mem_io_unlock(man);
632	ttm_bo_cleanup_refs_or_queue(bo);
633	kref_put(&bo->list_kref, ttm_bo_release_list);
634}
635
636void ttm_bo_unref(struct ttm_buffer_object **p_bo)
637{
638	struct ttm_buffer_object *bo = *p_bo;
639
640	*p_bo = NULL;
641	kref_put(&bo->kref, ttm_bo_release);
642}
643EXPORT_SYMBOL(ttm_bo_unref);
644
645int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
646{
647	return cancel_delayed_work_sync(&bdev->wq);
648}
649EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
650
651void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
652{
653	if (resched)
654		schedule_delayed_work(&bdev->wq,
655				      ((HZ / 100) < 1) ? 1 : HZ / 100);
656}
657EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
658
659static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
660			bool no_wait_gpu)
661{
662	struct ttm_bo_device *bdev = bo->bdev;
663	struct ttm_mem_reg evict_mem;
664	struct ttm_placement placement;
665	int ret = 0;
666
667	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
668
669	if (unlikely(ret != 0)) {
670		if (ret != -ERESTARTSYS) {
671			pr_err("Failed to expire sync object before buffer eviction\n");
672		}
673		goto out;
674	}
675
676	lockdep_assert_held(&bo->resv->lock.base);
677
678	evict_mem = bo->mem;
679	evict_mem.mm_node = NULL;
680	evict_mem.bus.io_reserved_vm = false;
681	evict_mem.bus.io_reserved_count = 0;
682
683	placement.num_placement = 0;
684	placement.num_busy_placement = 0;
685	bdev->driver->evict_flags(bo, &placement);
686	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
687				no_wait_gpu);
688	if (ret) {
689		if (ret != -ERESTARTSYS) {
690			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
691			       bo);
692			ttm_bo_mem_space_debug(bo, &placement);
693		}
694		goto out;
695	}
696
697	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
698				     no_wait_gpu);
699	if (ret) {
700		if (ret != -ERESTARTSYS)
701			pr_err("Buffer eviction failed\n");
702		ttm_bo_mem_put(bo, &evict_mem);
703		goto out;
704	}
705	bo->evicted = true;
706out:
707	return ret;
708}
709
710static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
711				uint32_t mem_type,
712				const struct ttm_place *place,
713				bool interruptible,
714				bool no_wait_gpu)
715{
716	struct ttm_bo_global *glob = bdev->glob;
717	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
718	struct ttm_buffer_object *bo;
719	int ret = -EBUSY, put_count;
720
721	spin_lock(&glob->lru_lock);
722	list_for_each_entry(bo, &man->lru, lru) {
723		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
724		if (!ret) {
725			if (place && (place->fpfn || place->lpfn)) {
726				/* Don't evict this BO if it's outside of the
727				 * requested placement range
728				 */
729				if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
730				    (place->lpfn && place->lpfn <= bo->mem.start)) {
731					__ttm_bo_unreserve(bo);
732					ret = -EBUSY;
733					continue;
734				}
735			}
736
737			break;
738		}
739	}
740
741	if (ret) {
742		spin_unlock(&glob->lru_lock);
743		return ret;
744	}
745
746	kref_get(&bo->list_kref);
747
748	if (!list_empty(&bo->ddestroy)) {
749		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
750						     no_wait_gpu);
751		kref_put(&bo->list_kref, ttm_bo_release_list);
752		return ret;
753	}
754
755	put_count = ttm_bo_del_from_lru(bo);
756	spin_unlock(&glob->lru_lock);
757
758	BUG_ON(ret != 0);
759
760	ttm_bo_list_ref_sub(bo, put_count, true);
761
762	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
763	ttm_bo_unreserve(bo);
764
765	kref_put(&bo->list_kref, ttm_bo_release_list);
766	return ret;
767}
768
769void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
770{
771	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
772
773	if (mem->mm_node)
774		(*man->func->put_node)(man, mem);
775}
776EXPORT_SYMBOL(ttm_bo_mem_put);
777
778/**
779 * Repeatedly evict memory from the LRU for @mem_type until we create enough
780 * space, or we've evicted everything and there isn't enough space.
781 */
782static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
783					uint32_t mem_type,
784					const struct ttm_place *place,
785					struct ttm_mem_reg *mem,
786					bool interruptible,
787					bool no_wait_gpu)
788{
789	struct ttm_bo_device *bdev = bo->bdev;
790	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
791	int ret;
792
793	do {
794		ret = (*man->func->get_node)(man, bo, place, mem);
795		if (unlikely(ret != 0))
796			return ret;
797		if (mem->mm_node)
798			break;
799		ret = ttm_mem_evict_first(bdev, mem_type, place,
800					  interruptible, no_wait_gpu);
801		if (unlikely(ret != 0))
802			return ret;
803	} while (1);
804	if (mem->mm_node == NULL)
805		return -ENOMEM;
806	mem->mem_type = mem_type;
807	return 0;
808}
809
810static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
811				      uint32_t cur_placement,
812				      uint32_t proposed_placement)
813{
814	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
815	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
816
817	/**
818	 * Keep current caching if possible.
819	 */
820
821	if ((cur_placement & caching) != 0)
822		result |= (cur_placement & caching);
823	else if ((man->default_caching & caching) != 0)
824		result |= man->default_caching;
825	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
826		result |= TTM_PL_FLAG_CACHED;
827	else if ((TTM_PL_FLAG_WC & caching) != 0)
828		result |= TTM_PL_FLAG_WC;
829	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
830		result |= TTM_PL_FLAG_UNCACHED;
831
832	return result;
833}
834
835static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
836				 uint32_t mem_type,
837				 const struct ttm_place *place,
838				 uint32_t *masked_placement)
839{
840	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
841
842	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
843		return false;
844
845	if ((place->flags & man->available_caching) == 0)
846		return false;
847
848	cur_flags |= (place->flags & man->available_caching);
849
850	*masked_placement = cur_flags;
851	return true;
852}
853
854/**
855 * Creates space for memory region @mem according to its type.
856 *
857 * This function first searches for free space in compatible memory types in
858 * the priority order defined by the driver.  If free space isn't found, then
859 * ttm_bo_mem_force_space is attempted in priority order to evict and find
860 * space.
861 */
862int ttm_bo_mem_space(struct ttm_buffer_object *bo,
863			struct ttm_placement *placement,
864			struct ttm_mem_reg *mem,
865			bool interruptible,
866			bool no_wait_gpu)
867{
868	struct ttm_bo_device *bdev = bo->bdev;
869	struct ttm_mem_type_manager *man;
870	uint32_t mem_type = TTM_PL_SYSTEM;
871	uint32_t cur_flags = 0;
872	bool type_found = false;
873	bool type_ok = false;
874	bool has_erestartsys = false;
875	int i, ret;
876
877	mem->mm_node = NULL;
878	for (i = 0; i < placement->num_placement; ++i) {
879		const struct ttm_place *place = &placement->placement[i];
880
881		ret = ttm_mem_type_from_place(place, &mem_type);
882		if (ret)
883			return ret;
884		man = &bdev->man[mem_type];
885
886		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
887						&cur_flags);
888
889		if (!type_ok)
890			continue;
891
892		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
893						  cur_flags);
894		/*
895		 * Use the access and other non-mapping-related flag bits from
896		 * the memory placement flags to the current flags
897		 */
898		ttm_flag_masked(&cur_flags, place->flags,
899				~TTM_PL_MASK_MEMTYPE);
900
901		if (mem_type == TTM_PL_SYSTEM)
902			break;
903
904		if (man->has_type && man->use_type) {
905			type_found = true;
906			ret = (*man->func->get_node)(man, bo, place, mem);
907			if (unlikely(ret))
908				return ret;
909		}
910		if (mem->mm_node)
911			break;
912	}
913
914	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
915		mem->mem_type = mem_type;
916		mem->placement = cur_flags;
917		return 0;
918	}
919
920	if (!type_found)
921		return -EINVAL;
922
923	for (i = 0; i < placement->num_busy_placement; ++i) {
924		const struct ttm_place *place = &placement->busy_placement[i];
925
926		ret = ttm_mem_type_from_place(place, &mem_type);
927		if (ret)
928			return ret;
929		man = &bdev->man[mem_type];
930		if (!man->has_type)
931			continue;
932		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
933			continue;
934
935		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
936						  cur_flags);
937		/*
938		 * Use the access and other non-mapping-related flag bits from
939		 * the memory placement flags to the current flags
940		 */
941		ttm_flag_masked(&cur_flags, place->flags,
942				~TTM_PL_MASK_MEMTYPE);
943
944		if (mem_type == TTM_PL_SYSTEM) {
945			mem->mem_type = mem_type;
946			mem->placement = cur_flags;
947			mem->mm_node = NULL;
948			return 0;
949		}
950
951		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
952						interruptible, no_wait_gpu);
953		if (ret == 0 && mem->mm_node) {
954			mem->placement = cur_flags;
955			return 0;
956		}
957		if (ret == -ERESTARTSYS)
958			has_erestartsys = true;
959	}
960	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
961	return ret;
962}
963EXPORT_SYMBOL(ttm_bo_mem_space);
964
965static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
966			struct ttm_placement *placement,
967			bool interruptible,
968			bool no_wait_gpu)
969{
970	int ret = 0;
971	struct ttm_mem_reg mem;
972
973	lockdep_assert_held(&bo->resv->lock.base);
974
975	/*
976	 * FIXME: It's possible to pipeline buffer moves.
977	 * Have the driver move function wait for idle when necessary,
978	 * instead of doing it here.
979	 */
980	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
981	if (ret)
982		return ret;
983	mem.num_pages = bo->num_pages;
984	mem.size = mem.num_pages << PAGE_SHIFT;
985	mem.page_alignment = bo->mem.page_alignment;
986	mem.bus.io_reserved_vm = false;
987	mem.bus.io_reserved_count = 0;
988	/*
989	 * Determine where to move the buffer.
990	 */
991	ret = ttm_bo_mem_space(bo, placement, &mem,
992			       interruptible, no_wait_gpu);
993	if (ret)
994		goto out_unlock;
995	ret = ttm_bo_handle_move_mem(bo, &mem, false,
996				     interruptible, no_wait_gpu);
997out_unlock:
998	if (ret && mem.mm_node)
999		ttm_bo_mem_put(bo, &mem);
1000	return ret;
1001}
1002
1003static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1004			      struct ttm_mem_reg *mem,
1005			      uint32_t *new_flags)
1006{
1007	int i;
1008
1009	for (i = 0; i < placement->num_placement; i++) {
1010		const struct ttm_place *heap = &placement->placement[i];
1011		if (mem->mm_node &&
1012		    (mem->start < heap->fpfn ||
1013		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1014			continue;
1015
1016		*new_flags = heap->flags;
1017		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1018		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1019			return true;
1020	}
1021
1022	for (i = 0; i < placement->num_busy_placement; i++) {
1023		const struct ttm_place *heap = &placement->busy_placement[i];
1024		if (mem->mm_node &&
1025		    (mem->start < heap->fpfn ||
1026		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1027			continue;
1028
1029		*new_flags = heap->flags;
1030		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1031		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1032			return true;
1033	}
1034
1035	return false;
1036}
1037
1038int ttm_bo_validate(struct ttm_buffer_object *bo,
1039			struct ttm_placement *placement,
1040			bool interruptible,
1041			bool no_wait_gpu)
1042{
1043	int ret;
1044	uint32_t new_flags;
1045
1046	lockdep_assert_held(&bo->resv->lock.base);
1047	/*
1048	 * Check whether we need to move buffer.
1049	 */
1050	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1051		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1052					 no_wait_gpu);
1053		if (ret)
1054			return ret;
1055	} else {
1056		/*
1057		 * Use the access and other non-mapping-related flag bits from
1058		 * the compatible memory placement flags to the active flags
1059		 */
1060		ttm_flag_masked(&bo->mem.placement, new_flags,
1061				~TTM_PL_MASK_MEMTYPE);
1062	}
1063	/*
1064	 * We might need to add a TTM.
1065	 */
1066	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1067		ret = ttm_bo_add_ttm(bo, true);
1068		if (ret)
1069			return ret;
1070	}
1071	return 0;
1072}
1073EXPORT_SYMBOL(ttm_bo_validate);
1074
1075int ttm_bo_init(struct ttm_bo_device *bdev,
1076		struct ttm_buffer_object *bo,
1077		unsigned long size,
1078		enum ttm_bo_type type,
1079		struct ttm_placement *placement,
1080		uint32_t page_alignment,
1081		bool interruptible,
1082		struct file *persistent_swap_storage,
1083		size_t acc_size,
1084		struct sg_table *sg,
1085		struct reservation_object *resv,
1086		void (*destroy) (struct ttm_buffer_object *))
1087{
1088	int ret = 0;
1089	unsigned long num_pages;
1090	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1091	bool locked;
1092
1093	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1094	if (ret) {
1095		pr_err("Out of kernel memory\n");
1096		if (destroy)
1097			(*destroy)(bo);
1098		else
1099			kfree(bo);
1100		return -ENOMEM;
1101	}
1102
1103	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104	if (num_pages == 0) {
1105		pr_err("Illegal buffer object size\n");
1106		if (destroy)
1107			(*destroy)(bo);
1108		else
1109			kfree(bo);
1110		ttm_mem_global_free(mem_glob, acc_size);
1111		return -EINVAL;
1112	}
1113	bo->destroy = destroy;
1114
1115	kref_init(&bo->kref);
1116	kref_init(&bo->list_kref);
1117	atomic_set(&bo->cpu_writers, 0);
1118	INIT_LIST_HEAD(&bo->lru);
1119	INIT_LIST_HEAD(&bo->ddestroy);
1120	INIT_LIST_HEAD(&bo->swap);
1121	INIT_LIST_HEAD(&bo->io_reserve_lru);
1122	mutex_init(&bo->wu_mutex);
1123	bo->bdev = bdev;
1124	bo->glob = bdev->glob;
1125	bo->type = type;
1126	bo->num_pages = num_pages;
1127	bo->mem.size = num_pages << PAGE_SHIFT;
1128	bo->mem.mem_type = TTM_PL_SYSTEM;
1129	bo->mem.num_pages = bo->num_pages;
1130	bo->mem.mm_node = NULL;
1131	bo->mem.page_alignment = page_alignment;
1132	bo->mem.bus.io_reserved_vm = false;
1133	bo->mem.bus.io_reserved_count = 0;
1134	bo->priv_flags = 0;
1135	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1136	bo->persistent_swap_storage = persistent_swap_storage;
1137	bo->acc_size = acc_size;
1138	bo->sg = sg;
1139	if (resv) {
1140		bo->resv = resv;
1141		lockdep_assert_held(&bo->resv->lock.base);
1142	} else {
1143		bo->resv = &bo->ttm_resv;
1144		reservation_object_init(&bo->ttm_resv);
1145	}
1146	atomic_inc(&bo->glob->bo_count);
1147	drm_vma_node_reset(&bo->vma_node);
1148
1149	/*
1150	 * For ttm_bo_type_device buffers, allocate
1151	 * address space from the device.
1152	 */
1153	if (bo->type == ttm_bo_type_device ||
1154	    bo->type == ttm_bo_type_sg)
1155		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1156					 bo->mem.num_pages);
1157
1158	/* passed reservation objects should already be locked,
1159	 * since otherwise lockdep will be angered in radeon.
1160	 */
1161	if (!resv) {
1162		locked = ww_mutex_trylock(&bo->resv->lock);
1163		WARN_ON(!locked);
1164	}
1165
1166	if (likely(!ret))
1167		ret = ttm_bo_validate(bo, placement, interruptible, false);
1168
1169	if (!resv)
1170		ttm_bo_unreserve(bo);
1171
1172	if (unlikely(ret))
1173		ttm_bo_unref(&bo);
1174
1175	return ret;
1176}
1177EXPORT_SYMBOL(ttm_bo_init);
1178
1179size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1180		       unsigned long bo_size,
1181		       unsigned struct_size)
1182{
1183	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1184	size_t size = 0;
1185
1186	size += ttm_round_pot(struct_size);
1187	size += PAGE_ALIGN(npages * sizeof(void *));
1188	size += ttm_round_pot(sizeof(struct ttm_tt));
1189	return size;
1190}
1191EXPORT_SYMBOL(ttm_bo_acc_size);
1192
1193size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1194			   unsigned long bo_size,
1195			   unsigned struct_size)
1196{
1197	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1198	size_t size = 0;
1199
1200	size += ttm_round_pot(struct_size);
1201	size += PAGE_ALIGN(npages * sizeof(void *));
1202	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1203	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1204	return size;
1205}
1206EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1207
1208int ttm_bo_create(struct ttm_bo_device *bdev,
1209			unsigned long size,
1210			enum ttm_bo_type type,
1211			struct ttm_placement *placement,
1212			uint32_t page_alignment,
1213			bool interruptible,
1214			struct file *persistent_swap_storage,
1215			struct ttm_buffer_object **p_bo)
1216{
1217	struct ttm_buffer_object *bo;
1218	size_t acc_size;
1219	int ret;
1220
1221	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1222	if (unlikely(bo == NULL))
1223		return -ENOMEM;
1224
1225	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1226	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1227			  interruptible, persistent_swap_storage, acc_size,
1228			  NULL, NULL, NULL);
1229	if (likely(ret == 0))
1230		*p_bo = bo;
1231
1232	return ret;
1233}
1234EXPORT_SYMBOL(ttm_bo_create);
1235
1236static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1237					unsigned mem_type, bool allow_errors)
1238{
1239	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1240	struct ttm_bo_global *glob = bdev->glob;
1241	int ret;
1242
1243	/*
1244	 * Can't use standard list traversal since we're unlocking.
1245	 */
1246
1247	spin_lock(&glob->lru_lock);
1248	while (!list_empty(&man->lru)) {
1249		spin_unlock(&glob->lru_lock);
1250		ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1251		if (ret) {
1252			if (allow_errors) {
1253				return ret;
1254			} else {
1255				pr_err("Cleanup eviction failed\n");
1256			}
1257		}
1258		spin_lock(&glob->lru_lock);
1259	}
1260	spin_unlock(&glob->lru_lock);
1261	return 0;
1262}
1263
1264int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1265{
1266	struct ttm_mem_type_manager *man;
1267	int ret = -EINVAL;
1268
1269	if (mem_type >= TTM_NUM_MEM_TYPES) {
1270		pr_err("Illegal memory type %d\n", mem_type);
1271		return ret;
1272	}
1273	man = &bdev->man[mem_type];
1274
1275	if (!man->has_type) {
1276		pr_err("Trying to take down uninitialized memory manager type %u\n",
1277		       mem_type);
1278		return ret;
1279	}
1280
1281	man->use_type = false;
1282	man->has_type = false;
1283
1284	ret = 0;
1285	if (mem_type > 0) {
1286		ttm_bo_force_list_clean(bdev, mem_type, false);
1287
1288		ret = (*man->func->takedown)(man);
1289	}
1290
1291	return ret;
1292}
1293EXPORT_SYMBOL(ttm_bo_clean_mm);
1294
1295int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1296{
1297	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1298
1299	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1300		pr_err("Illegal memory manager memory type %u\n", mem_type);
1301		return -EINVAL;
1302	}
1303
1304	if (!man->has_type) {
1305		pr_err("Memory type %u has not been initialized\n", mem_type);
1306		return 0;
1307	}
1308
1309	return ttm_bo_force_list_clean(bdev, mem_type, true);
1310}
1311EXPORT_SYMBOL(ttm_bo_evict_mm);
1312
1313int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1314			unsigned long p_size)
1315{
1316	int ret = -EINVAL;
1317	struct ttm_mem_type_manager *man;
1318
1319	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1320	man = &bdev->man[type];
1321	BUG_ON(man->has_type);
1322	man->io_reserve_fastpath = true;
1323	man->use_io_reserve_lru = false;
1324	mutex_init(&man->io_reserve_mutex);
1325	INIT_LIST_HEAD(&man->io_reserve_lru);
1326
1327	ret = bdev->driver->init_mem_type(bdev, type, man);
1328	if (ret)
1329		return ret;
1330	man->bdev = bdev;
1331
1332	ret = 0;
1333	if (type != TTM_PL_SYSTEM) {
1334		ret = (*man->func->init)(man, p_size);
1335		if (ret)
1336			return ret;
1337	}
1338	man->has_type = true;
1339	man->use_type = true;
1340	man->size = p_size;
1341
1342	INIT_LIST_HEAD(&man->lru);
1343
1344	return 0;
1345}
1346EXPORT_SYMBOL(ttm_bo_init_mm);
1347
1348static void ttm_bo_global_kobj_release(struct kobject *kobj)
1349{
1350	struct ttm_bo_global *glob =
1351		container_of(kobj, struct ttm_bo_global, kobj);
1352
1353	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1354	__free_page(glob->dummy_read_page);
1355	kfree(glob);
1356}
1357
1358void ttm_bo_global_release(struct drm_global_reference *ref)
1359{
1360	struct ttm_bo_global *glob = ref->object;
1361
1362	kobject_del(&glob->kobj);
1363	kobject_put(&glob->kobj);
1364}
1365EXPORT_SYMBOL(ttm_bo_global_release);
1366
1367int ttm_bo_global_init(struct drm_global_reference *ref)
1368{
1369	struct ttm_bo_global_ref *bo_ref =
1370		container_of(ref, struct ttm_bo_global_ref, ref);
1371	struct ttm_bo_global *glob = ref->object;
1372	int ret;
1373
1374	mutex_init(&glob->device_list_mutex);
1375	spin_lock_init(&glob->lru_lock);
1376	glob->mem_glob = bo_ref->mem_glob;
1377	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1378
1379	if (unlikely(glob->dummy_read_page == NULL)) {
1380		ret = -ENOMEM;
1381		goto out_no_drp;
1382	}
1383
1384	INIT_LIST_HEAD(&glob->swap_lru);
1385	INIT_LIST_HEAD(&glob->device_list);
1386
1387	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1388	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1389	if (unlikely(ret != 0)) {
1390		pr_err("Could not register buffer object swapout\n");
1391		goto out_no_shrink;
1392	}
1393
1394	atomic_set(&glob->bo_count, 0);
1395
1396	ret = kobject_init_and_add(
1397		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1398	if (unlikely(ret != 0))
1399		kobject_put(&glob->kobj);
1400	return ret;
1401out_no_shrink:
1402	__free_page(glob->dummy_read_page);
1403out_no_drp:
1404	kfree(glob);
1405	return ret;
1406}
1407EXPORT_SYMBOL(ttm_bo_global_init);
1408
1409
1410int ttm_bo_device_release(struct ttm_bo_device *bdev)
1411{
1412	int ret = 0;
1413	unsigned i = TTM_NUM_MEM_TYPES;
1414	struct ttm_mem_type_manager *man;
1415	struct ttm_bo_global *glob = bdev->glob;
1416
1417	while (i--) {
1418		man = &bdev->man[i];
1419		if (man->has_type) {
1420			man->use_type = false;
1421			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1422				ret = -EBUSY;
1423				pr_err("DRM memory manager type %d is not clean\n",
1424				       i);
1425			}
1426			man->has_type = false;
1427		}
1428	}
1429
1430	mutex_lock(&glob->device_list_mutex);
1431	list_del(&bdev->device_list);
1432	mutex_unlock(&glob->device_list_mutex);
1433
1434	cancel_delayed_work_sync(&bdev->wq);
1435
1436	while (ttm_bo_delayed_delete(bdev, true))
1437		;
1438
1439	spin_lock(&glob->lru_lock);
1440	if (list_empty(&bdev->ddestroy))
1441		TTM_DEBUG("Delayed destroy list was clean\n");
1442
1443	if (list_empty(&bdev->man[0].lru))
1444		TTM_DEBUG("Swap list was clean\n");
1445	spin_unlock(&glob->lru_lock);
1446
1447	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1448
1449	return ret;
1450}
1451EXPORT_SYMBOL(ttm_bo_device_release);
1452
1453int ttm_bo_device_init(struct ttm_bo_device *bdev,
1454		       struct ttm_bo_global *glob,
1455		       struct ttm_bo_driver *driver,
1456		       struct address_space *mapping,
1457		       uint64_t file_page_offset,
1458		       bool need_dma32)
1459{
1460	int ret = -EINVAL;
1461
1462	bdev->driver = driver;
1463
1464	memset(bdev->man, 0, sizeof(bdev->man));
1465
1466	/*
1467	 * Initialize the system memory buffer type.
1468	 * Other types need to be driver / IOCTL initialized.
1469	 */
1470	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1471	if (unlikely(ret != 0))
1472		goto out_no_sys;
1473
1474	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1475				    0x10000000);
1476	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1477	INIT_LIST_HEAD(&bdev->ddestroy);
1478	bdev->dev_mapping = mapping;
1479	bdev->glob = glob;
1480	bdev->need_dma32 = need_dma32;
1481	bdev->val_seq = 0;
1482	mutex_lock(&glob->device_list_mutex);
1483	list_add_tail(&bdev->device_list, &glob->device_list);
1484	mutex_unlock(&glob->device_list_mutex);
1485
1486	return 0;
1487out_no_sys:
1488	return ret;
1489}
1490EXPORT_SYMBOL(ttm_bo_device_init);
1491
1492/*
1493 * buffer object vm functions.
1494 */
1495
1496bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1497{
1498	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1499
1500	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1501		if (mem->mem_type == TTM_PL_SYSTEM)
1502			return false;
1503
1504		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1505			return false;
1506
1507		if (mem->placement & TTM_PL_FLAG_CACHED)
1508			return false;
1509	}
1510	return true;
1511}
1512
1513void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1514{
1515	struct ttm_bo_device *bdev = bo->bdev;
1516
1517	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1518	ttm_mem_io_free_vm(bo);
1519}
1520
1521void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1522{
1523	struct ttm_bo_device *bdev = bo->bdev;
1524	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1525
1526	ttm_mem_io_lock(man, false);
1527	ttm_bo_unmap_virtual_locked(bo);
1528	ttm_mem_io_unlock(man);
1529}
1530
1531
1532EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1533
1534int ttm_bo_wait(struct ttm_buffer_object *bo,
1535		bool lazy, bool interruptible, bool no_wait)
1536{
1537	struct reservation_object_list *fobj;
1538	struct reservation_object *resv;
1539	struct fence *excl;
1540	long timeout = 15 * HZ;
1541	int i;
1542
1543	resv = bo->resv;
1544	fobj = reservation_object_get_list(resv);
1545	excl = reservation_object_get_excl(resv);
1546	if (excl) {
1547		if (!fence_is_signaled(excl)) {
1548			if (no_wait)
1549				return -EBUSY;
1550
1551			timeout = fence_wait_timeout(excl,
1552						     interruptible, timeout);
1553		}
1554	}
1555
1556	for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
1557		struct fence *fence;
1558		fence = rcu_dereference_protected(fobj->shared[i],
1559						reservation_object_held(resv));
1560
1561		if (!fence_is_signaled(fence)) {
1562			if (no_wait)
1563				return -EBUSY;
1564
1565			timeout = fence_wait_timeout(fence,
1566						     interruptible, timeout);
1567		}
1568	}
1569
1570	if (timeout < 0)
1571		return timeout;
1572
1573	if (timeout == 0)
1574		return -EBUSY;
1575
1576	reservation_object_add_excl_fence(resv, NULL);
1577	clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1578	return 0;
1579}
1580EXPORT_SYMBOL(ttm_bo_wait);
1581
1582int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1583{
1584	int ret = 0;
1585
1586	/*
1587	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1588	 */
1589
1590	ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
1591	if (unlikely(ret != 0))
1592		return ret;
1593	ret = ttm_bo_wait(bo, false, true, no_wait);
1594	if (likely(ret == 0))
1595		atomic_inc(&bo->cpu_writers);
1596	ttm_bo_unreserve(bo);
1597	return ret;
1598}
1599EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1600
1601void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1602{
1603	atomic_dec(&bo->cpu_writers);
1604}
1605EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1606
1607/**
1608 * A buffer object shrink method that tries to swap out the first
1609 * buffer object on the bo_global::swap_lru list.
1610 */
1611
1612static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1613{
1614	struct ttm_bo_global *glob =
1615	    container_of(shrink, struct ttm_bo_global, shrink);
1616	struct ttm_buffer_object *bo;
1617	int ret = -EBUSY;
1618	int put_count;
1619	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1620
1621	spin_lock(&glob->lru_lock);
1622	list_for_each_entry(bo, &glob->swap_lru, swap) {
1623		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
1624		if (!ret)
1625			break;
1626	}
1627
1628	if (ret) {
1629		spin_unlock(&glob->lru_lock);
1630		return ret;
1631	}
1632
1633	kref_get(&bo->list_kref);
1634
1635	if (!list_empty(&bo->ddestroy)) {
1636		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1637		kref_put(&bo->list_kref, ttm_bo_release_list);
1638		return ret;
1639	}
1640
1641	put_count = ttm_bo_del_from_lru(bo);
1642	spin_unlock(&glob->lru_lock);
1643
1644	ttm_bo_list_ref_sub(bo, put_count, true);
1645
1646	/**
1647	 * Wait for GPU, then move to system cached.
1648	 */
1649
1650	ret = ttm_bo_wait(bo, false, false, false);
1651
1652	if (unlikely(ret != 0))
1653		goto out;
1654
1655	if ((bo->mem.placement & swap_placement) != swap_placement) {
1656		struct ttm_mem_reg evict_mem;
1657
1658		evict_mem = bo->mem;
1659		evict_mem.mm_node = NULL;
1660		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1661		evict_mem.mem_type = TTM_PL_SYSTEM;
1662
1663		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1664					     false, false);
1665		if (unlikely(ret != 0))
1666			goto out;
1667	}
1668
1669	ttm_bo_unmap_virtual(bo);
1670
1671	/**
1672	 * Swap out. Buffer will be swapped in again as soon as
1673	 * anyone tries to access a ttm page.
1674	 */
1675
1676	if (bo->bdev->driver->swap_notify)
1677		bo->bdev->driver->swap_notify(bo);
1678
1679	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1680out:
1681
1682	/**
1683	 *
1684	 * Unreserve without putting on LRU to avoid swapping out an
1685	 * already swapped buffer.
1686	 */
1687
1688	__ttm_bo_unreserve(bo);
1689	kref_put(&bo->list_kref, ttm_bo_release_list);
1690	return ret;
1691}
1692
1693void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1694{
1695	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1696		;
1697}
1698EXPORT_SYMBOL(ttm_bo_swapout_all);
1699
1700/**
1701 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1702 * unreserved
1703 *
1704 * @bo: Pointer to buffer
1705 */
1706int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1707{
1708	int ret;
1709
1710	/*
1711	 * In the absense of a wait_unlocked API,
1712	 * Use the bo::wu_mutex to avoid triggering livelocks due to
1713	 * concurrent use of this function. Note that this use of
1714	 * bo::wu_mutex can go away if we change locking order to
1715	 * mmap_sem -> bo::reserve.
1716	 */
1717	ret = mutex_lock_interruptible(&bo->wu_mutex);
1718	if (unlikely(ret != 0))
1719		return -ERESTARTSYS;
1720	if (!ww_mutex_is_locked(&bo->resv->lock))
1721		goto out_unlock;
1722	ret = __ttm_bo_reserve(bo, true, false, false, NULL);
1723	if (unlikely(ret != 0))
1724		goto out_unlock;
1725	__ttm_bo_unreserve(bo);
1726
1727out_unlock:
1728	mutex_unlock(&bo->wu_mutex);
1729	return ret;
1730}
1731