[go: nahoru, domu]

nouveau_chan.c revision f45f55c4bbf8a9f9c607e5f6013abac60427e3f7
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/device.h>
28#include <core/class.h>
29
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32#include "nouveau_bo.h"
33#include "nouveau_chan.h"
34#include "nouveau_fence.h"
35#include "nouveau_abi16.h"
36
37MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
38static int nouveau_vram_pushbuf;
39module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
40
41int
42nouveau_channel_idle(struct nouveau_channel *chan)
43{
44	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
45	struct nouveau_fence *fence = NULL;
46	int ret;
47
48	ret = nouveau_fence_new(chan, false, &fence);
49	if (!ret) {
50		ret = nouveau_fence_wait(fence, false, false);
51		nouveau_fence_unref(&fence);
52	}
53
54	if (ret)
55		NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
56			  chan->object->handle, nvkm_client(&cli->base)->name);
57	return ret;
58}
59
60void
61nouveau_channel_del(struct nouveau_channel **pchan)
62{
63	struct nouveau_channel *chan = *pchan;
64	if (chan) {
65		if (chan->fence) {
66			nouveau_channel_idle(chan);
67			nouveau_fence(chan->drm)->context_del(chan);
68		}
69		nvif_object_fini(&chan->nvsw);
70		nvif_object_fini(&chan->gart);
71		nvif_object_fini(&chan->vram);
72		nvif_object_ref(NULL, &chan->object);
73		nvif_object_fini(&chan->push.ctxdma);
74		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
75		nouveau_bo_unmap(chan->push.buffer);
76		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
77			nouveau_bo_unpin(chan->push.buffer);
78		nouveau_bo_ref(NULL, &chan->push.buffer);
79		nvif_device_ref(NULL, &chan->device);
80		kfree(chan);
81	}
82	*pchan = NULL;
83}
84
85static int
86nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
87		     u32 handle, u32 size, struct nouveau_channel **pchan)
88{
89	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
90	struct nouveau_instmem *imem = nvkm_instmem(device);
91	struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
92	struct nouveau_fb *pfb = nvkm_fb(device);
93	struct nv_dma_class args = {};
94	struct nouveau_channel *chan;
95	u32 target;
96	int ret;
97
98	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
99	if (!chan)
100		return -ENOMEM;
101
102	nvif_device_ref(device, &chan->device);
103	chan->drm = drm;
104
105	/* allocate memory for dma push buffer */
106	target = TTM_PL_FLAG_TT;
107	if (nouveau_vram_pushbuf)
108		target = TTM_PL_FLAG_VRAM;
109
110	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
111			    &chan->push.buffer);
112	if (ret == 0) {
113		ret = nouveau_bo_pin(chan->push.buffer, target);
114		if (ret == 0)
115			ret = nouveau_bo_map(chan->push.buffer);
116	}
117
118	if (ret) {
119		nouveau_channel_del(pchan);
120		return ret;
121	}
122
123	/* create dma object covering the *entire* memory space that the
124	 * pushbuf lives in, this is because the GEM code requires that
125	 * we be able to call out to other (indirect) push buffers
126	 */
127	chan->push.vma.offset = chan->push.buffer->bo.offset;
128
129	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
130		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
131					&chan->push.vma);
132		if (ret) {
133			nouveau_channel_del(pchan);
134			return ret;
135		}
136
137		args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
138		args.start = 0;
139		args.limit = cli->vm->vmm->limit - 1;
140	} else
141	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
142		u64 limit = pfb->ram->size - imem->reserved - 1;
143		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
144			/* nv04 vram pushbuf hack, retarget to its location in
145			 * the framebuffer bar rather than direct vram access..
146			 * nfi why this exists, it came from the -nv ddx.
147			 */
148			args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
149			args.start = nv_device_resource_start(nvkm_device(device), 1);
150			args.limit = args.start + limit;
151		} else {
152			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
153			args.start = 0;
154			args.limit = limit;
155		}
156	} else {
157		if (chan->drm->agp.stat == ENABLED) {
158			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
159			args.start = chan->drm->agp.base;
160			args.limit = chan->drm->agp.base +
161				     chan->drm->agp.size - 1;
162		} else {
163			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
164			args.start = 0;
165			args.limit = vmm->limit - 1;
166		}
167	}
168
169	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
170			       (handle & 0xffff), NV_DMA_FROM_MEMORY_CLASS,
171			       &args, sizeof(args), &chan->push.ctxdma);
172	if (ret) {
173		nouveau_channel_del(pchan);
174		return ret;
175	}
176
177	return 0;
178}
179
180static int
181nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
182		    u32 handle, u32 engine, struct nouveau_channel **pchan)
183{
184	static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
185					NVC0_CHANNEL_IND_CLASS,
186					NV84_CHANNEL_IND_CLASS,
187					NV50_CHANNEL_IND_CLASS,
188					0 };
189	const u16 *oclass = oclasses;
190	struct nve0_channel_ind_class args;
191	struct nouveau_channel *chan;
192	int ret;
193
194	/* allocate dma push buffer */
195	ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan);
196	*pchan = chan;
197	if (ret)
198		return ret;
199
200	/* create channel object */
201	args.pushbuf = chan->push.ctxdma.handle;
202	args.ioffset = 0x10000 + chan->push.vma.offset;
203	args.ilength = 0x02000;
204	args.engine  = engine;
205
206	do {
207		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
208				     &args, sizeof(args), &chan->object);
209		if (ret == 0)
210			return ret;
211	} while (*oclass);
212
213	nouveau_channel_del(pchan);
214	return ret;
215}
216
217static int
218nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
219		    u32 handle, struct nouveau_channel **pchan)
220{
221	static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
222					NV17_CHANNEL_DMA_CLASS,
223					NV10_CHANNEL_DMA_CLASS,
224					NV03_CHANNEL_DMA_CLASS,
225					0 };
226	const u16 *oclass = oclasses;
227	struct nv03_channel_dma_class args;
228	struct nouveau_channel *chan;
229	int ret;
230
231	/* allocate dma push buffer */
232	ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan);
233	*pchan = chan;
234	if (ret)
235		return ret;
236
237	/* create channel object */
238	args.pushbuf = chan->push.ctxdma.handle;
239	args.offset = chan->push.vma.offset;
240
241	do {
242		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
243				     &args, sizeof(args), &chan->object);
244		if (ret == 0)
245			return ret;
246	} while (ret && *oclass);
247
248	nouveau_channel_del(pchan);
249	return ret;
250}
251
252static int
253nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
254{
255	struct nvif_device *device = chan->device;
256	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
257	struct nouveau_instmem *imem = nvkm_instmem(device);
258	struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
259	struct nouveau_fb *pfb = nvkm_fb(device);
260	struct nouveau_software_chan *swch;
261	struct nv_dma_class args = {};
262	int ret, i;
263
264	/* allocate dma objects to cover all allowed vram, and gart */
265	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
266		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
267			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
268			args.start = 0;
269			args.limit = cli->vm->vmm->limit - 1;
270		} else {
271			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
272			args.start = 0;
273			args.limit = pfb->ram->size - imem->reserved - 1;
274		}
275
276		ret = nvif_object_init(chan->object, NULL, vram,
277				       NV_DMA_IN_MEMORY_CLASS, &args,
278				       sizeof(args), &chan->vram);
279		if (ret)
280			return ret;
281
282		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
283			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
284			args.start = 0;
285			args.limit = cli->vm->vmm->limit - 1;
286		} else
287		if (chan->drm->agp.stat == ENABLED) {
288			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
289			args.start = chan->drm->agp.base;
290			args.limit = chan->drm->agp.base +
291				     chan->drm->agp.size - 1;
292		} else {
293			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
294			args.start = 0;
295			args.limit = vmm->limit - 1;
296		}
297
298		ret = nvif_object_init(chan->object, NULL, gart,
299				       NV_DMA_IN_MEMORY_CLASS, &args,
300				       sizeof(args), &chan->gart);
301		if (ret)
302			return ret;
303	}
304
305	/* initialise dma tracking parameters */
306	switch (chan->object->oclass & 0x00ff) {
307	case 0x006b:
308	case 0x006e:
309		chan->user_put = 0x40;
310		chan->user_get = 0x44;
311		chan->dma.max = (0x10000 / 4) - 2;
312		break;
313	default:
314		chan->user_put = 0x40;
315		chan->user_get = 0x44;
316		chan->user_get_hi = 0x60;
317		chan->dma.ib_base =  0x10000 / 4;
318		chan->dma.ib_max  = (0x02000 / 8) - 1;
319		chan->dma.ib_put  = 0;
320		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
321		chan->dma.max = chan->dma.ib_base;
322		break;
323	}
324
325	chan->dma.put = 0;
326	chan->dma.cur = chan->dma.put;
327	chan->dma.free = chan->dma.max - chan->dma.cur;
328
329	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
330	if (ret)
331		return ret;
332
333	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
334		OUT_RING(chan, 0x00000000);
335
336	/* allocate software object class (used for fences on <= nv05) */
337	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
338		ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
339				       NULL, 0, &chan->nvsw);
340		if (ret)
341			return ret;
342
343		swch = (void *)nvkm_object(&chan->nvsw)->parent;
344		swch->flip = nouveau_flip_complete;
345		swch->flip_data = chan;
346
347		ret = RING_SPACE(chan, 2);
348		if (ret)
349			return ret;
350
351		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
352		OUT_RING  (chan, chan->nvsw.handle);
353		FIRE_RING (chan);
354	}
355
356	/* initialise synchronisation */
357	return nouveau_fence(chan->drm)->context_new(chan);
358}
359
360int
361nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
362		    u32 handle, u32 arg0, u32 arg1,
363		    struct nouveau_channel **pchan)
364{
365	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
366	int ret;
367
368	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
369	if (ret) {
370		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
371		ret = nouveau_channel_dma(drm, device, handle, pchan);
372		if (ret) {
373			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
374			return ret;
375		}
376	}
377
378	ret = nouveau_channel_init(*pchan, arg0, arg1);
379	if (ret) {
380		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
381		nouveau_channel_del(pchan);
382		return ret;
383	}
384
385	return 0;
386}
387