[go: nahoru, domu]

base.c revision d005f51eb93d71cd40ebd11dd377453fa8c8a42a
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/mm.h>
27
28#include <subdev/fb.h>
29#include <subdev/vm.h>
30
31void
32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
33{
34	struct nouveau_vm *vm = vma->vm;
35	struct nouveau_vmmgr *vmm = vm->vmm;
36	struct nouveau_mm_node *r;
37	int big = vma->node->type != vmm->spg_shift;
38	u32 offset = vma->node->offset + (delta >> 12);
39	u32 bits = vma->node->type - 12;
40	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
41	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
42	u32 max  = 1 << (vmm->pgt_bits - bits);
43	u32 end, len;
44
45	delta = 0;
46	list_for_each_entry(r, &node->regions, rl_entry) {
47		u64 phys = (u64)r->offset << 12;
48		u32 num  = r->length >> bits;
49
50		while (num) {
51			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
52
53			end = (pte + num);
54			if (unlikely(end >= max))
55				end = max;
56			len = end - pte;
57
58			vmm->map(vma, pgt, node, pte, len, phys, delta);
59
60			num -= len;
61			pte += len;
62			if (unlikely(end >= max)) {
63				phys += len << (bits + 12);
64				pde++;
65				pte = 0;
66			}
67
68			delta += (u64)len << vma->node->type;
69		}
70	}
71
72	vmm->flush(vm);
73}
74
75void
76nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
77{
78	nouveau_vm_map_at(vma, 0, node);
79}
80
81void
82nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
83			struct nouveau_mem *mem)
84{
85	struct nouveau_vm *vm = vma->vm;
86	struct nouveau_vmmgr *vmm = vm->vmm;
87	int big = vma->node->type != vmm->spg_shift;
88	u32 offset = vma->node->offset + (delta >> 12);
89	u32 bits = vma->node->type - 12;
90	u32 num  = length >> vma->node->type;
91	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
92	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
93	u32 max  = 1 << (vmm->pgt_bits - bits);
94	unsigned m, sglen;
95	u32 end, len;
96	int i;
97	struct scatterlist *sg;
98
99	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
100		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
101		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
102
103		end = pte + sglen;
104		if (unlikely(end >= max))
105			end = max;
106		len = end - pte;
107
108		for (m = 0; m < len; m++) {
109			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
110
111			vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
112			num--;
113			pte++;
114
115			if (num == 0)
116				goto finish;
117		}
118		if (unlikely(end >= max)) {
119			pde++;
120			pte = 0;
121		}
122		if (m < sglen) {
123			for (; m < sglen; m++) {
124				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
125
126				vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
127				num--;
128				pte++;
129				if (num == 0)
130					goto finish;
131			}
132		}
133
134	}
135finish:
136	vmm->flush(vm);
137}
138
139void
140nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
141		  struct nouveau_mem *mem)
142{
143	struct nouveau_vm *vm = vma->vm;
144	struct nouveau_vmmgr *vmm = vm->vmm;
145	dma_addr_t *list = mem->pages;
146	int big = vma->node->type != vmm->spg_shift;
147	u32 offset = vma->node->offset + (delta >> 12);
148	u32 bits = vma->node->type - 12;
149	u32 num  = length >> vma->node->type;
150	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
151	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
152	u32 max  = 1 << (vmm->pgt_bits - bits);
153	u32 end, len;
154
155	while (num) {
156		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
157
158		end = (pte + num);
159		if (unlikely(end >= max))
160			end = max;
161		len = end - pte;
162
163		vmm->map_sg(vma, pgt, mem, pte, len, list);
164
165		num  -= len;
166		pte  += len;
167		list += len;
168		if (unlikely(end >= max)) {
169			pde++;
170			pte = 0;
171		}
172	}
173
174	vmm->flush(vm);
175}
176
177void
178nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
179{
180	struct nouveau_vm *vm = vma->vm;
181	struct nouveau_vmmgr *vmm = vm->vmm;
182	int big = vma->node->type != vmm->spg_shift;
183	u32 offset = vma->node->offset + (delta >> 12);
184	u32 bits = vma->node->type - 12;
185	u32 num  = length >> vma->node->type;
186	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
187	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
188	u32 max  = 1 << (vmm->pgt_bits - bits);
189	u32 end, len;
190
191	while (num) {
192		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
193
194		end = (pte + num);
195		if (unlikely(end >= max))
196			end = max;
197		len = end - pte;
198
199		vmm->unmap(pgt, pte, len);
200
201		num -= len;
202		pte += len;
203		if (unlikely(end >= max)) {
204			pde++;
205			pte = 0;
206		}
207	}
208
209	vmm->flush(vm);
210}
211
212void
213nouveau_vm_unmap(struct nouveau_vma *vma)
214{
215	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
216}
217
218static void
219nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
220{
221	struct nouveau_vmmgr *vmm = vm->vmm;
222	struct nouveau_vm_pgd *vpgd;
223	struct nouveau_vm_pgt *vpgt;
224	struct nouveau_gpuobj *pgt;
225	u32 pde;
226
227	for (pde = fpde; pde <= lpde; pde++) {
228		vpgt = &vm->pgt[pde - vm->fpde];
229		if (--vpgt->refcount[big])
230			continue;
231
232		pgt = vpgt->obj[big];
233		vpgt->obj[big] = NULL;
234
235		list_for_each_entry(vpgd, &vm->pgd_list, head) {
236			vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
237		}
238
239		mutex_unlock(&nv_subdev(vmm)->mutex);
240		nouveau_gpuobj_ref(NULL, &pgt);
241		mutex_lock(&nv_subdev(vmm)->mutex);
242	}
243}
244
245static int
246nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
247{
248	struct nouveau_vmmgr *vmm = vm->vmm;
249	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
250	struct nouveau_vm_pgd *vpgd;
251	struct nouveau_gpuobj *pgt;
252	int big = (type != vmm->spg_shift);
253	u32 pgt_size;
254	int ret;
255
256	pgt_size  = (1 << (vmm->pgt_bits + 12)) >> type;
257	pgt_size *= 8;
258
259	mutex_unlock(&nv_subdev(vmm)->mutex);
260	ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
261				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
262	mutex_lock(&nv_subdev(vmm)->mutex);
263	if (unlikely(ret))
264		return ret;
265
266	/* someone beat us to filling the PDE while we didn't have the lock */
267	if (unlikely(vpgt->refcount[big]++)) {
268		mutex_unlock(&nv_subdev(vmm)->mutex);
269		nouveau_gpuobj_ref(NULL, &pgt);
270		mutex_lock(&nv_subdev(vmm)->mutex);
271		return 0;
272	}
273
274	vpgt->obj[big] = pgt;
275	list_for_each_entry(vpgd, &vm->pgd_list, head) {
276		vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
277	}
278
279	return 0;
280}
281
282int
283nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
284	       u32 access, struct nouveau_vma *vma)
285{
286	struct nouveau_vmmgr *vmm = vm->vmm;
287	u32 align = (1 << page_shift) >> 12;
288	u32 msize = size >> 12;
289	u32 fpde, lpde, pde;
290	int ret;
291
292	mutex_lock(&nv_subdev(vmm)->mutex);
293	ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294			     &vma->node);
295	if (unlikely(ret != 0)) {
296		mutex_unlock(&nv_subdev(vmm)->mutex);
297		return ret;
298	}
299
300	fpde = (vma->node->offset >> vmm->pgt_bits);
301	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
302
303	for (pde = fpde; pde <= lpde; pde++) {
304		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
305		int big = (vma->node->type != vmm->spg_shift);
306
307		if (likely(vpgt->refcount[big])) {
308			vpgt->refcount[big]++;
309			continue;
310		}
311
312		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
313		if (ret) {
314			if (pde != fpde)
315				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
316			nouveau_mm_free(&vm->mm, &vma->node);
317			mutex_unlock(&nv_subdev(vmm)->mutex);
318			return ret;
319		}
320	}
321	mutex_unlock(&nv_subdev(vmm)->mutex);
322
323	vma->vm = NULL;
324	nouveau_vm_ref(vm, &vma->vm, NULL);
325	vma->offset = (u64)vma->node->offset << 12;
326	vma->access = access;
327	return 0;
328}
329
330void
331nouveau_vm_put(struct nouveau_vma *vma)
332{
333	struct nouveau_vm *vm = vma->vm;
334	struct nouveau_vmmgr *vmm = vm->vmm;
335	u32 fpde, lpde;
336
337	if (unlikely(vma->node == NULL))
338		return;
339	fpde = (vma->node->offset >> vmm->pgt_bits);
340	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
341
342	mutex_lock(&nv_subdev(vmm)->mutex);
343	nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
344	nouveau_mm_free(&vm->mm, &vma->node);
345	mutex_unlock(&nv_subdev(vmm)->mutex);
346
347	nouveau_vm_ref(NULL, &vma->vm, NULL);
348}
349
350int
351nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
352		  u64 mm_offset, u32 block, struct nouveau_vm **pvm)
353{
354	struct nouveau_vm *vm;
355	u64 mm_length = (offset + length) - mm_offset;
356	int ret;
357
358	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
359	if (!vm)
360		return -ENOMEM;
361
362	INIT_LIST_HEAD(&vm->pgd_list);
363	vm->vmm = vmm;
364	vm->refcount = 1;
365	vm->fpde = offset >> (vmm->pgt_bits + 12);
366	vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
367
368	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
369	if (!vm->pgt) {
370		kfree(vm);
371		return -ENOMEM;
372	}
373
374	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
375			      block >> 12);
376	if (ret) {
377		vfree(vm->pgt);
378		kfree(vm);
379		return ret;
380	}
381
382	*pvm = vm;
383
384	return 0;
385}
386
387int
388nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
389	       u64 mm_offset, struct nouveau_vm **pvm)
390{
391	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
392	return vmm->create(vmm, offset, length, mm_offset, pvm);
393}
394
395static int
396nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
397{
398	struct nouveau_vmmgr *vmm = vm->vmm;
399	struct nouveau_vm_pgd *vpgd;
400	int i;
401
402	if (!pgd)
403		return 0;
404
405	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
406	if (!vpgd)
407		return -ENOMEM;
408
409	nouveau_gpuobj_ref(pgd, &vpgd->obj);
410
411	mutex_lock(&nv_subdev(vmm)->mutex);
412	for (i = vm->fpde; i <= vm->lpde; i++)
413		vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
414	list_add(&vpgd->head, &vm->pgd_list);
415	mutex_unlock(&nv_subdev(vmm)->mutex);
416	return 0;
417}
418
419static void
420nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
421{
422	struct nouveau_vmmgr *vmm = vm->vmm;
423	struct nouveau_vm_pgd *vpgd, *tmp;
424	struct nouveau_gpuobj *pgd = NULL;
425
426	if (!mpgd)
427		return;
428
429	mutex_lock(&nv_subdev(vmm)->mutex);
430	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
431		if (vpgd->obj == mpgd) {
432			pgd = vpgd->obj;
433			list_del(&vpgd->head);
434			kfree(vpgd);
435			break;
436		}
437	}
438	mutex_unlock(&nv_subdev(vmm)->mutex);
439
440	nouveau_gpuobj_ref(NULL, &pgd);
441}
442
443static void
444nouveau_vm_del(struct nouveau_vm *vm)
445{
446	struct nouveau_vm_pgd *vpgd, *tmp;
447
448	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
449		nouveau_vm_unlink(vm, vpgd->obj);
450	}
451
452	nouveau_mm_fini(&vm->mm);
453	vfree(vm->pgt);
454	kfree(vm);
455}
456
457int
458nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
459	       struct nouveau_gpuobj *pgd)
460{
461	struct nouveau_vm *vm;
462	int ret;
463
464	vm = ref;
465	if (vm) {
466		ret = nouveau_vm_link(vm, pgd);
467		if (ret)
468			return ret;
469
470		vm->refcount++;
471	}
472
473	vm = *ptr;
474	*ptr = ref;
475
476	if (vm) {
477		nouveau_vm_unlink(vm, pgd);
478
479		if (--vm->refcount == 0)
480			nouveau_vm_del(vm);
481	}
482
483	return 0;
484}
485