[go: nahoru, domu]

1/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
14
15#include "exynos_drm_drv.h"
16#include "exynos_drm_gem.h"
17#include "exynos_drm_buf.h"
18#include "exynos_drm_iommu.h"
19
20static int lowlevel_buffer_allocate(struct drm_device *dev,
21		unsigned int flags, struct exynos_drm_gem_buf *buf)
22{
23	int ret = 0;
24	enum dma_attr attr;
25	unsigned int nr_pages;
26
27	if (buf->dma_addr) {
28		DRM_DEBUG_KMS("already allocated.\n");
29		return 0;
30	}
31
32	init_dma_attrs(&buf->dma_attrs);
33
34	/*
35	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36	 * region will be allocated else physically contiguous
37	 * as possible.
38	 */
39	if (!(flags & EXYNOS_BO_NONCONTIG))
40		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42	/*
43	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44	 * else cachable mapping.
45	 */
46	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
47		attr = DMA_ATTR_WRITE_COMBINE;
48	else
49		attr = DMA_ATTR_NON_CONSISTENT;
50
51	dma_set_attr(attr, &buf->dma_attrs);
52	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
53
54	nr_pages = buf->size >> PAGE_SHIFT;
55
56	if (!is_drm_iommu_supported(dev)) {
57		dma_addr_t start_addr;
58		unsigned int i = 0;
59
60		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
61		if (!buf->pages) {
62			DRM_ERROR("failed to allocate pages.\n");
63			return -ENOMEM;
64		}
65
66		buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
67					buf->size,
68					&buf->dma_addr, GFP_KERNEL,
69					&buf->dma_attrs);
70		if (!buf->kvaddr) {
71			DRM_ERROR("failed to allocate buffer.\n");
72			ret = -ENOMEM;
73			goto err_free;
74		}
75
76		start_addr = buf->dma_addr;
77		while (i < nr_pages) {
78			buf->pages[i] = phys_to_page(start_addr);
79			start_addr += PAGE_SIZE;
80			i++;
81		}
82	} else {
83
84		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
85					&buf->dma_addr, GFP_KERNEL,
86					&buf->dma_attrs);
87		if (!buf->pages) {
88			DRM_ERROR("failed to allocate buffer.\n");
89			return -ENOMEM;
90		}
91	}
92
93	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
94	if (IS_ERR(buf->sgt)) {
95		DRM_ERROR("failed to get sg table.\n");
96		ret = PTR_ERR(buf->sgt);
97		goto err_free_attrs;
98	}
99
100	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
101			(unsigned long)buf->dma_addr,
102			buf->size);
103
104	return ret;
105
106err_free_attrs:
107	dma_free_attrs(dev->dev, buf->size, buf->pages,
108			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
109	buf->dma_addr = (dma_addr_t)NULL;
110err_free:
111	if (!is_drm_iommu_supported(dev))
112		drm_free_large(buf->pages);
113
114	return ret;
115}
116
117static void lowlevel_buffer_deallocate(struct drm_device *dev,
118		unsigned int flags, struct exynos_drm_gem_buf *buf)
119{
120	if (!buf->dma_addr) {
121		DRM_DEBUG_KMS("dma_addr is invalid.\n");
122		return;
123	}
124
125	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
126			(unsigned long)buf->dma_addr,
127			buf->size);
128
129	sg_free_table(buf->sgt);
130
131	kfree(buf->sgt);
132	buf->sgt = NULL;
133
134	if (!is_drm_iommu_supported(dev)) {
135		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
136				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
137		drm_free_large(buf->pages);
138	} else
139		dma_free_attrs(dev->dev, buf->size, buf->pages,
140				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141
142	buf->dma_addr = (dma_addr_t)NULL;
143}
144
145struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
146						unsigned int size)
147{
148	struct exynos_drm_gem_buf *buffer;
149
150	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
151
152	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
153	if (!buffer)
154		return NULL;
155
156	buffer->size = size;
157	return buffer;
158}
159
160void exynos_drm_fini_buf(struct drm_device *dev,
161				struct exynos_drm_gem_buf *buffer)
162{
163	kfree(buffer);
164	buffer = NULL;
165}
166
167int exynos_drm_alloc_buf(struct drm_device *dev,
168		struct exynos_drm_gem_buf *buf, unsigned int flags)
169{
170
171	/*
172	 * allocate memory region and set the memory information
173	 * to vaddr and dma_addr of a buffer object.
174	 */
175	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
176		return -ENOMEM;
177
178	return 0;
179}
180
181void exynos_drm_free_buf(struct drm_device *dev,
182		unsigned int flags, struct exynos_drm_gem_buf *buffer)
183{
184
185	lowlevel_buffer_deallocate(dev, flags, buffer);
186}
187