[go: nahoru, domu]

nouveau_gem.c revision 9a391ad8a2cdd7e5be9b6aabb56f4a46683ba377
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38	return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44	struct nouveau_bo *nvbo = gem->driver_private;
45	struct ttm_buffer_object *bo = &nvbo->bo;
46
47	if (!nvbo)
48		return;
49	nvbo->gem = NULL;
50
51	if (unlikely(nvbo->cpu_filp))
52		ttm_bo_synccpu_write_release(bo);
53
54	if (unlikely(nvbo->pin_refcnt)) {
55		nvbo->pin_refcnt = 1;
56		nouveau_bo_unpin(nvbo);
57	}
58
59	ttm_bo_unref(&bo);
60}
61
62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64		int size, int align, uint32_t flags, uint32_t tile_mode,
65		uint32_t tile_flags, bool no_vm, bool mappable,
66		struct nouveau_bo **pnvbo)
67{
68	struct nouveau_bo *nvbo;
69	int ret;
70
71	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
72			     tile_flags, no_vm, mappable, pnvbo);
73	if (ret)
74		return ret;
75	nvbo = *pnvbo;
76
77	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
78	if (!nvbo->gem) {
79		nouveau_bo_ref(NULL, pnvbo);
80		return -ENOMEM;
81	}
82
83	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
84	nvbo->gem->driver_private = nvbo;
85	return 0;
86}
87
88static int
89nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
90{
91	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
92
93	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
94		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
95	else
96		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
97
98	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
99	rep->offset = nvbo->bo.offset;
100	rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
101	rep->tile_mode = nvbo->tile_mode;
102	rep->tile_flags = nvbo->tile_flags;
103	return 0;
104}
105
106static bool
107nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
108	switch (tile_flags) {
109	case 0x0000:
110	case 0x1800:
111	case 0x2800:
112	case 0x4800:
113	case 0x7000:
114	case 0x7400:
115	case 0x7a00:
116	case 0xe000:
117		break;
118	default:
119		NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
120		return false;
121	}
122
123	return true;
124}
125
126int
127nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
128		      struct drm_file *file_priv)
129{
130	struct drm_nouveau_private *dev_priv = dev->dev_private;
131	struct drm_nouveau_gem_new *req = data;
132	struct nouveau_bo *nvbo = NULL;
133	struct nouveau_channel *chan = NULL;
134	uint32_t flags = 0;
135	int ret = 0;
136
137	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
138
139	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
140		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
141
142	if (req->channel_hint) {
143		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
144						     file_priv, chan);
145	}
146
147	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
148		flags |= TTM_PL_FLAG_VRAM;
149	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
150		flags |= TTM_PL_FLAG_TT;
151	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
152		flags |= TTM_PL_FLAG_SYSTEM;
153
154	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
155		return -EINVAL;
156
157	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
158			      req->info.tile_mode, req->info.tile_flags, false,
159			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
160			      &nvbo);
161	if (ret)
162		return ret;
163
164	ret = nouveau_gem_info(nvbo->gem, &req->info);
165	if (ret)
166		goto out;
167
168	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out:
170	mutex_lock(&dev->struct_mutex);
171	drm_gem_object_handle_unreference(nvbo->gem);
172	mutex_unlock(&dev->struct_mutex);
173
174	if (ret)
175		drm_gem_object_unreference(nvbo->gem);
176	return ret;
177}
178
179static int
180nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
181		       uint32_t write_domains, uint32_t valid_domains)
182{
183	struct nouveau_bo *nvbo = gem->driver_private;
184	struct ttm_buffer_object *bo = &nvbo->bo;
185	uint64_t flags;
186
187	if (!valid_domains || (!read_domains && !write_domains))
188		return -EINVAL;
189
190	if (write_domains) {
191		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
192		    (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
193			flags = TTM_PL_FLAG_VRAM;
194		else
195		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
196		    (write_domains & NOUVEAU_GEM_DOMAIN_GART))
197			flags = TTM_PL_FLAG_TT;
198		else
199			return -EINVAL;
200	} else {
201		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
202		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
203		    bo->mem.mem_type == TTM_PL_VRAM)
204			flags = TTM_PL_FLAG_VRAM;
205		else
206		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
207		    (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
208		    bo->mem.mem_type == TTM_PL_TT)
209			flags = TTM_PL_FLAG_TT;
210		else
211		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
212		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
213			flags = TTM_PL_FLAG_VRAM;
214		else
215			flags = TTM_PL_FLAG_TT;
216	}
217
218	nouveau_bo_placement_set(nvbo, flags);
219	return 0;
220}
221
222struct validate_op {
223	struct list_head vram_list;
224	struct list_head gart_list;
225	struct list_head both_list;
226};
227
228static void
229validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
230{
231	struct list_head *entry, *tmp;
232	struct nouveau_bo *nvbo;
233
234	list_for_each_safe(entry, tmp, list) {
235		nvbo = list_entry(entry, struct nouveau_bo, entry);
236		if (likely(fence)) {
237			struct nouveau_fence *prev_fence;
238
239			spin_lock(&nvbo->bo.lock);
240			prev_fence = nvbo->bo.sync_obj;
241			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
242			spin_unlock(&nvbo->bo.lock);
243			nouveau_fence_unref((void *)&prev_fence);
244		}
245
246		list_del(&nvbo->entry);
247		nvbo->reserved_by = NULL;
248		ttm_bo_unreserve(&nvbo->bo);
249		drm_gem_object_unreference(nvbo->gem);
250	}
251}
252
253static void
254validate_fini(struct validate_op *op, struct nouveau_fence* fence)
255{
256	validate_fini_list(&op->vram_list, fence);
257	validate_fini_list(&op->gart_list, fence);
258	validate_fini_list(&op->both_list, fence);
259}
260
261static int
262validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
263	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
264	      int nr_buffers, struct validate_op *op)
265{
266	struct drm_device *dev = chan->dev;
267	struct drm_nouveau_private *dev_priv = dev->dev_private;
268	uint32_t sequence;
269	int trycnt = 0;
270	int ret, i;
271
272	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
273retry:
274	if (++trycnt > 100000) {
275		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
276		return -EINVAL;
277	}
278
279	for (i = 0; i < nr_buffers; i++) {
280		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
281		struct drm_gem_object *gem;
282		struct nouveau_bo *nvbo;
283
284		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
285		if (!gem) {
286			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
287			validate_fini(op, NULL);
288			return -EINVAL;
289		}
290		nvbo = gem->driver_private;
291
292		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
293			NV_ERROR(dev, "multiple instances of buffer %d on "
294				      "validation list\n", b->handle);
295			validate_fini(op, NULL);
296			return -EINVAL;
297		}
298
299		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
300		if (ret) {
301			validate_fini(op, NULL);
302			if (ret == -EAGAIN)
303				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304			drm_gem_object_unreference(gem);
305			if (ret)
306				return ret;
307			goto retry;
308		}
309
310		nvbo->reserved_by = file_priv;
311		nvbo->pbbo_index = i;
312		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
313		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
314			list_add_tail(&nvbo->entry, &op->both_list);
315		else
316		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
317			list_add_tail(&nvbo->entry, &op->vram_list);
318		else
319		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
320			list_add_tail(&nvbo->entry, &op->gart_list);
321		else {
322			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
323				 b->valid_domains);
324			list_add_tail(&nvbo->entry, &op->both_list);
325			validate_fini(op, NULL);
326			return -EINVAL;
327		}
328
329		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
330			validate_fini(op, NULL);
331
332			if (nvbo->cpu_filp == file_priv) {
333				NV_ERROR(dev, "bo %p mapped by process trying "
334					      "to validate it!\n", nvbo);
335				return -EINVAL;
336			}
337
338			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339			if (ret)
340				return ret;
341			goto retry;
342		}
343	}
344
345	return 0;
346}
347
348static int
349validate_list(struct nouveau_channel *chan, struct list_head *list,
350	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
351{
352	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353				(void __force __user *)(uintptr_t)user_pbbo_ptr;
354	struct nouveau_bo *nvbo;
355	int ret, relocs = 0;
356
357	list_for_each_entry(nvbo, list, entry) {
358		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
359		struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
360
361		if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
362			spin_lock(&nvbo->bo.lock);
363			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364			spin_unlock(&nvbo->bo.lock);
365			if (unlikely(ret))
366				return ret;
367		}
368
369		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370					     b->write_domains,
371					     b->valid_domains);
372		if (unlikely(ret))
373			return ret;
374
375		nvbo->channel = chan;
376		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377				      false, false);
378		nvbo->channel = NULL;
379		if (unlikely(ret))
380			return ret;
381
382		if (nvbo->bo.offset == b->presumed_offset &&
383		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385		     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
387			continue;
388
389		if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390			b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
391		else
392			b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
393		b->presumed_offset = nvbo->bo.offset;
394		b->presumed_ok = 0;
395		relocs++;
396
397		if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
398			return -EFAULT;
399	}
400
401	return relocs;
402}
403
404static int
405nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
406			     struct drm_file *file_priv,
407			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
408			     uint64_t user_buffers, int nr_buffers,
409			     struct validate_op *op, int *apply_relocs)
410{
411	int ret, relocs = 0;
412
413	INIT_LIST_HEAD(&op->vram_list);
414	INIT_LIST_HEAD(&op->gart_list);
415	INIT_LIST_HEAD(&op->both_list);
416
417	if (nr_buffers == 0)
418		return 0;
419
420	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421	if (unlikely(ret))
422		return ret;
423
424	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425	if (unlikely(ret < 0)) {
426		validate_fini(op, NULL);
427		return ret;
428	}
429	relocs += ret;
430
431	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432	if (unlikely(ret < 0)) {
433		validate_fini(op, NULL);
434		return ret;
435	}
436	relocs += ret;
437
438	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439	if (unlikely(ret < 0)) {
440		validate_fini(op, NULL);
441		return ret;
442	}
443	relocs += ret;
444
445	*apply_relocs = relocs;
446	return 0;
447}
448
449static inline void *
450u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
451{
452	void *mem;
453	void __user *userptr = (void __force __user *)(uintptr_t)user;
454
455	mem = kmalloc(nmemb * size, GFP_KERNEL);
456	if (!mem)
457		return ERR_PTR(-ENOMEM);
458
459	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
460		kfree(mem);
461		return ERR_PTR(-EFAULT);
462	}
463
464	return mem;
465}
466
467static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
469				struct drm_nouveau_gem_pushbuf_bo *bo,
470				unsigned nr_relocs, uint64_t ptr_relocs,
471				unsigned nr_dwords, unsigned first_dword,
472				uint32_t *pushbuf, bool is_iomem)
473{
474	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475	struct drm_device *dev = chan->dev;
476	int ret = 0;
477	unsigned i;
478
479	reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
480	if (IS_ERR(reloc))
481		return PTR_ERR(reloc);
482
483	for (i = 0; i < nr_relocs; i++) {
484		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485		struct drm_nouveau_gem_pushbuf_bo *b;
486		uint32_t data;
487
488		if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
489		    r->reloc_index >= first_dword + nr_dwords) {
490			NV_ERROR(dev, "Bad relocation %d\n", i);
491			NV_ERROR(dev, "  bo: %d max %d\n", r->bo_index, nr_bo);
492			NV_ERROR(dev, "  id: %d max %d\n", r->reloc_index, nr_dwords);
493			ret = -EINVAL;
494			break;
495		}
496
497		b = &bo[r->bo_index];
498		if (b->presumed_ok)
499			continue;
500
501		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502			data = b->presumed_offset + r->data;
503		else
504		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505			data = (b->presumed_offset + r->data) >> 32;
506		else
507			data = r->data;
508
509		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510			if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
511				data |= r->tor;
512			else
513				data |= r->vor;
514		}
515
516		if (is_iomem)
517			iowrite32_native(data, (void __force __iomem *)
518						&pushbuf[r->reloc_index]);
519		else
520			pushbuf[r->reloc_index] = data;
521	}
522
523	kfree(reloc);
524	return ret;
525}
526
527int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529			  struct drm_file *file_priv)
530{
531	struct drm_nouveau_gem_pushbuf *req = data;
532	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
533	struct nouveau_channel *chan;
534	struct validate_op op;
535	struct nouveau_fence* fence = 0;
536	uint32_t *pushbuf = NULL;
537	int ret = 0, do_reloc = 0, i;
538
539	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541
542	if (req->nr_dwords >= chan->dma.max ||
543	    req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
544	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
545		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
546		NV_ERROR(dev, "  dwords : %d max %d\n", req->nr_dwords,
547			 chan->dma.max - 1);
548		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
549			 NOUVEAU_GEM_MAX_BUFFERS);
550		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
551			 NOUVEAU_GEM_MAX_RELOCS);
552		return -EINVAL;
553	}
554
555	pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556	if (IS_ERR(pushbuf))
557		return PTR_ERR(pushbuf);
558
559	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560	if (IS_ERR(bo)) {
561		kfree(pushbuf);
562		return PTR_ERR(bo);
563	}
564
565	mutex_lock(&dev->struct_mutex);
566
567	/* Validate buffer list */
568	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569					   req->nr_buffers, &op, &do_reloc);
570	if (ret)
571		goto out;
572
573	/* Apply any relocations that are required */
574	if (do_reloc) {
575		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576						      bo, req->nr_relocs,
577						      req->relocs,
578						      req->nr_dwords, 0,
579						      pushbuf, false);
580		if (ret)
581			goto out;
582	}
583
584	/* Emit push buffer to the hw
585	 */
586	ret = RING_SPACE(chan, req->nr_dwords);
587	if (ret)
588		goto out;
589
590	OUT_RINGp(chan, pushbuf, req->nr_dwords);
591
592	ret = nouveau_fence_new(chan, &fence, true);
593	if (ret) {
594		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
595		WIND_RING(chan);
596		goto out;
597	}
598
599	if (nouveau_gem_pushbuf_sync(chan)) {
600		ret = nouveau_fence_wait(fence, NULL, false, false);
601		if (ret) {
602			for (i = 0; i < req->nr_dwords; i++)
603				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604			NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605		}
606	}
607
608out:
609	validate_fini(&op, fence);
610	nouveau_fence_unref((void**)&fence);
611	mutex_unlock(&dev->struct_mutex);
612	kfree(pushbuf);
613	kfree(bo);
614	return ret;
615}
616
617#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
618
619int
620nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
621			       struct drm_file *file_priv)
622{
623	struct drm_nouveau_private *dev_priv = dev->dev_private;
624	struct drm_nouveau_gem_pushbuf_call *req = data;
625	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
626	struct nouveau_channel *chan;
627	struct drm_gem_object *gem;
628	struct nouveau_bo *pbbo;
629	struct validate_op op;
630	struct nouveau_fence* fence = 0;
631	int i, ret = 0, do_reloc = 0;
632
633	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
634	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
635
636	if (unlikely(req->handle == 0))
637		goto out_next;
638
639	if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
640	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
641		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
642		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
643			 NOUVEAU_GEM_MAX_BUFFERS);
644		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
645			 NOUVEAU_GEM_MAX_RELOCS);
646		return -EINVAL;
647	}
648
649	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
650	if (IS_ERR(bo))
651		return PTR_ERR(bo);
652
653	mutex_lock(&dev->struct_mutex);
654
655	/* Validate buffer list */
656	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
657					   req->nr_buffers, &op, &do_reloc);
658	if (ret) {
659		NV_ERROR(dev, "validate: %d\n", ret);
660		goto out;
661	}
662
663	/* Validate DMA push buffer */
664	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
665	if (!gem) {
666		NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
667		ret = -EINVAL;
668		goto out;
669	}
670	pbbo = nouveau_gem_object(gem);
671
672	if ((req->offset & 3) || req->nr_dwords < 2 ||
673	    (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674	    (unsigned long)req->nr_dwords >
675	     ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676		NV_ERROR(dev, "pb call misaligned or out of bounds: "
677			      "%d + %d * 4 > %ld\n",
678			 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679		ret = -EINVAL;
680		drm_gem_object_unreference(gem);
681		goto out;
682	}
683
684	ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
685			     chan->fence.sequence);
686	if (ret) {
687		NV_ERROR(dev, "resv pb: %d\n", ret);
688		drm_gem_object_unreference(gem);
689		goto out;
690	}
691
692	nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
693	ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
694	if (ret) {
695		NV_ERROR(dev, "validate pb: %d\n", ret);
696		ttm_bo_unreserve(&pbbo->bo);
697		drm_gem_object_unreference(gem);
698		goto out;
699	}
700
701	list_add_tail(&pbbo->entry, &op.both_list);
702
703	/* If presumed return address doesn't match, we need to map the
704	 * push buffer and fix it..
705	 */
706	if (!PUSHBUF_CAL) {
707		uint32_t retaddy;
708
709		if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
710			ret = nouveau_dma_wait(chan, 0, 4 + NOUVEAU_DMA_SKIPS);
711			if (ret) {
712				NV_ERROR(dev, "jmp_space: %d\n", ret);
713				goto out;
714			}
715		}
716
717		retaddy  = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
718		retaddy |= 0x20000000;
719		if (retaddy != req->suffix0) {
720			req->suffix0 = retaddy;
721			do_reloc = 1;
722		}
723	}
724
725	/* Apply any relocations that are required */
726	if (do_reloc) {
727		void *pbvirt;
728		bool is_iomem;
729		ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
730				  &pbbo->kmap);
731		if (ret) {
732			NV_ERROR(dev, "kmap pb: %d\n", ret);
733			goto out;
734		}
735
736		pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
737		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
738						      req->nr_relocs,
739						      req->relocs,
740						      req->nr_dwords,
741						      req->offset / 4,
742						      pbvirt, is_iomem);
743
744		if (!PUSHBUF_CAL) {
745			nouveau_bo_wr32(pbbo,
746					req->offset / 4 + req->nr_dwords - 2,
747					req->suffix0);
748		}
749
750		ttm_bo_kunmap(&pbbo->kmap);
751		if (ret) {
752			NV_ERROR(dev, "reloc apply: %d\n", ret);
753			goto out;
754		}
755	}
756
757	if (chan->dma.ib_max) {
758		ret = nouveau_dma_wait(chan, 2, 6);
759		if (ret) {
760			NV_INFO(dev, "nv50cal_space: %d\n", ret);
761			goto out;
762		}
763
764		nv50_dma_push(chan, pbbo, req->offset, req->nr_dwords);
765	} else
766	if (PUSHBUF_CAL) {
767		ret = RING_SPACE(chan, 2);
768		if (ret) {
769			NV_ERROR(dev, "cal_space: %d\n", ret);
770			goto out;
771		}
772		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
773				  req->offset) | 2);
774		OUT_RING(chan, 0);
775	} else {
776		ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
777		if (ret) {
778			NV_ERROR(dev, "jmp_space: %d\n", ret);
779			goto out;
780		}
781		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
782				  req->offset) | 0x20000000);
783		OUT_RING(chan, 0);
784
785		/* Space the jumps apart with NOPs. */
786		for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
787			OUT_RING(chan, 0);
788	}
789
790	ret = nouveau_fence_new(chan, &fence, true);
791	if (ret) {
792		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
793		WIND_RING(chan);
794		goto out;
795	}
796
797out:
798	validate_fini(&op, fence);
799	nouveau_fence_unref((void**)&fence);
800	mutex_unlock(&dev->struct_mutex);
801	kfree(bo);
802
803out_next:
804	if (chan->dma.ib_max) {
805		req->suffix0 = 0x00000000;
806		req->suffix1 = 0x00000000;
807	} else
808	if (PUSHBUF_CAL) {
809		req->suffix0 = 0x00020000;
810		req->suffix1 = 0x00000000;
811	} else {
812		req->suffix0 = 0x20000000 |
813			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
814		req->suffix1 = 0x00000000;
815	}
816
817	return ret;
818}
819
820int
821nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
822				struct drm_file *file_priv)
823{
824	struct drm_nouveau_private *dev_priv = dev->dev_private;
825	struct drm_nouveau_gem_pushbuf_call *req = data;
826
827	req->vram_available = dev_priv->fb_aper_free;
828	req->gart_available = dev_priv->gart_info.aper_free;
829
830	return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
831}
832
833static inline uint32_t
834domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
835{
836	uint32_t flags = 0;
837
838	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
839		flags |= TTM_PL_FLAG_VRAM;
840	if (domain & NOUVEAU_GEM_DOMAIN_GART)
841		flags |= TTM_PL_FLAG_TT;
842
843	return flags;
844}
845
846int
847nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
848		      struct drm_file *file_priv)
849{
850	struct drm_nouveau_gem_pin *req = data;
851	struct drm_gem_object *gem;
852	struct nouveau_bo *nvbo;
853	int ret = 0;
854
855	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
856
857	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
858		NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
859		return -EINVAL;
860	}
861
862	if (!DRM_SUSER(DRM_CURPROC))
863		return -EPERM;
864
865	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
866	if (!gem)
867		return -EINVAL;
868	nvbo = nouveau_gem_object(gem);
869
870	ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
871	if (ret)
872		goto out;
873
874	req->offset = nvbo->bo.offset;
875	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
876		req->domain = NOUVEAU_GEM_DOMAIN_GART;
877	else
878		req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
879
880out:
881	mutex_lock(&dev->struct_mutex);
882	drm_gem_object_unreference(gem);
883	mutex_unlock(&dev->struct_mutex);
884
885	return ret;
886}
887
888int
889nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
890			struct drm_file *file_priv)
891{
892	struct drm_nouveau_gem_pin *req = data;
893	struct drm_gem_object *gem;
894	int ret;
895
896	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
897
898	if (drm_core_check_feature(dev, DRIVER_MODESET))
899		return -EINVAL;
900
901	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
902	if (!gem)
903		return -EINVAL;
904
905	ret = nouveau_bo_unpin(nouveau_gem_object(gem));
906
907	mutex_lock(&dev->struct_mutex);
908	drm_gem_object_unreference(gem);
909	mutex_unlock(&dev->struct_mutex);
910
911	return ret;
912}
913
914int
915nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
916			   struct drm_file *file_priv)
917{
918	struct drm_nouveau_gem_cpu_prep *req = data;
919	struct drm_gem_object *gem;
920	struct nouveau_bo *nvbo;
921	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
922	int ret = -EINVAL;
923
924	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
925
926	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
927	if (!gem)
928		return ret;
929	nvbo = nouveau_gem_object(gem);
930
931	if (nvbo->cpu_filp) {
932		if (nvbo->cpu_filp == file_priv)
933			goto out;
934
935		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
936		if (ret)
937			goto out;
938	}
939
940	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
941		spin_lock(&nvbo->bo.lock);
942		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
943		spin_unlock(&nvbo->bo.lock);
944	} else {
945		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
946		if (ret == 0)
947			nvbo->cpu_filp = file_priv;
948	}
949
950out:
951	mutex_lock(&dev->struct_mutex);
952	drm_gem_object_unreference(gem);
953	mutex_unlock(&dev->struct_mutex);
954	return ret;
955}
956
957int
958nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
959			   struct drm_file *file_priv)
960{
961	struct drm_nouveau_gem_cpu_prep *req = data;
962	struct drm_gem_object *gem;
963	struct nouveau_bo *nvbo;
964	int ret = -EINVAL;
965
966	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
967
968	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
969	if (!gem)
970		return ret;
971	nvbo = nouveau_gem_object(gem);
972
973	if (nvbo->cpu_filp != file_priv)
974		goto out;
975	nvbo->cpu_filp = NULL;
976
977	ttm_bo_synccpu_write_release(&nvbo->bo);
978	ret = 0;
979
980out:
981	mutex_lock(&dev->struct_mutex);
982	drm_gem_object_unreference(gem);
983	mutex_unlock(&dev->struct_mutex);
984	return ret;
985}
986
987int
988nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
989		       struct drm_file *file_priv)
990{
991	struct drm_nouveau_gem_info *req = data;
992	struct drm_gem_object *gem;
993	int ret;
994
995	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
996
997	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
998	if (!gem)
999		return -EINVAL;
1000
1001	ret = nouveau_gem_info(gem, req);
1002	mutex_lock(&dev->struct_mutex);
1003	drm_gem_object_unreference(gem);
1004	mutex_unlock(&dev->struct_mutex);
1005	return ret;
1006}
1007
1008