[go: nahoru, domu]

ion_system_heap.c revision ee4a4986d1a5998ada72c805c040daf759b687be
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30					    __GFP_NOWARN | __GFP_NORETRY |
31					    __GFP_NO_KSWAPD);
32static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33					 __GFP_NOWARN);
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38	int i;
39	for (i = 0; i < num_orders; i++)
40		if (order == orders[i])
41			return i;
42	BUG();
43	return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48	return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52	struct ion_heap heap;
53	struct ion_page_pool **pools;
54};
55
56struct page_info {
57	struct page *page;
58	unsigned int order;
59	struct list_head list;
60};
61
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63				      struct ion_buffer *buffer,
64				      unsigned long order)
65{
66	bool cached = ion_buffer_cached(buffer);
67	bool split_pages = ion_buffer_fault_user_mappings(buffer);
68	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69	struct page *page;
70
71	if (!cached) {
72		page = ion_page_pool_alloc(pool);
73	} else {
74		gfp_t gfp_flags = low_order_gfp_flags;
75
76		if (order > 4)
77			gfp_flags = high_order_gfp_flags;
78		page = alloc_pages(gfp_flags, order);
79	}
80	if (!page)
81		return 0;
82	if (split_pages)
83		split_page(page, order);
84	return page;
85}
86
87static void free_buffer_page(struct ion_system_heap *heap,
88			     struct ion_buffer *buffer, struct page *page,
89			     unsigned int order)
90{
91	bool cached = ion_buffer_cached(buffer);
92	bool split_pages = ion_buffer_fault_user_mappings(buffer);
93	int i;
94
95	if (!cached) {
96		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
97		/* zero the pages before returning them to the pool for
98		   security.  This uses vmap as we want to set the pgprot so
99		   the writes to occur to noncached mappings, as the pool's
100		   purpose is to keep the pages out of the cache */
101		for (i = 0; i < order / PAGE_SIZE; i++) {
102			struct page *sub_page = page + i;
103			void *addr = vmap(&sub_page, 1, VM_MAP,
104					  pgprot_writecombine(PAGE_KERNEL));
105			memset(addr, 0, PAGE_SIZE);
106			vunmap(addr);
107		}
108		ion_page_pool_free(pool, page);
109	} else if (split_pages) {
110		for (i = 0; i < (1 << order); i++)
111			__free_page(page + i);
112	} else {
113		__free_pages(page, order);
114	}
115}
116
117
118static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
119						 struct ion_buffer *buffer,
120						 unsigned long size,
121						 unsigned int max_order)
122{
123	struct page *page;
124	struct page_info *info;
125	int i;
126
127	for (i = 0; i < num_orders; i++) {
128		if (size < order_to_size(orders[i]))
129			continue;
130		if (max_order < orders[i])
131			continue;
132
133		page = alloc_buffer_page(heap, buffer, orders[i]);
134		if (!page)
135			continue;
136
137		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
138		info->page = page;
139		info->order = orders[i];
140		return info;
141	}
142	return NULL;
143}
144
145static int ion_system_heap_allocate(struct ion_heap *heap,
146				     struct ion_buffer *buffer,
147				     unsigned long size, unsigned long align,
148				     unsigned long flags)
149{
150	struct ion_system_heap *sys_heap = container_of(heap,
151							struct ion_system_heap,
152							heap);
153	struct sg_table *table;
154	struct scatterlist *sg;
155	int ret;
156	struct list_head pages;
157	struct page_info *info, *tmp_info;
158	int i = 0;
159	long size_remaining = PAGE_ALIGN(size);
160	unsigned int max_order = orders[0];
161	bool split_pages = ion_buffer_fault_user_mappings(buffer);
162
163	INIT_LIST_HEAD(&pages);
164	while (size_remaining > 0) {
165		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
166		if (!info)
167			goto err;
168		list_add_tail(&info->list, &pages);
169		size_remaining -= (1 << info->order) * PAGE_SIZE;
170		max_order = info->order;
171		i++;
172	}
173
174	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
175	if (!table)
176		goto err;
177
178	if (split_pages)
179		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
180				     GFP_KERNEL);
181	else
182		ret = sg_alloc_table(table, i, GFP_KERNEL);
183
184	if (ret)
185		goto err1;
186
187	sg = table->sgl;
188	list_for_each_entry_safe(info, tmp_info, &pages, list) {
189		struct page *page = info->page;
190		if (split_pages) {
191			for (i = 0; i < (1 << info->order); i++) {
192				sg_set_page(sg, page + i, PAGE_SIZE, 0);
193				sg = sg_next(sg);
194			}
195		} else {
196			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
197				    0);
198			sg = sg_next(sg);
199		}
200		list_del(&info->list);
201		kfree(info);
202	}
203
204	dma_sync_sg_for_device(NULL, table->sgl, table->nents,
205			       DMA_BIDIRECTIONAL);
206
207	buffer->priv_virt = table;
208	return 0;
209err1:
210	kfree(table);
211err:
212	list_for_each_entry(info, &pages, list) {
213		free_buffer_page(sys_heap, buffer, info->page, info->order);
214		kfree(info);
215	}
216	return -ENOMEM;
217}
218
219void ion_system_heap_free(struct ion_buffer *buffer)
220{
221	struct ion_heap *heap = buffer->heap;
222	struct ion_system_heap *sys_heap = container_of(heap,
223							struct ion_system_heap,
224							heap);
225	struct sg_table *table = buffer->priv_virt;
226	struct scatterlist *sg;
227	LIST_HEAD(pages);
228	int i;
229
230	for_each_sg(table->sgl, sg, table->nents, i)
231		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
232	sg_free_table(table);
233	kfree(table);
234}
235
236struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
237					 struct ion_buffer *buffer)
238{
239	return buffer->priv_virt;
240}
241
242void ion_system_heap_unmap_dma(struct ion_heap *heap,
243			       struct ion_buffer *buffer)
244{
245	return;
246}
247
248void *ion_system_heap_map_kernel(struct ion_heap *heap,
249				 struct ion_buffer *buffer)
250{
251	struct scatterlist *sg;
252	int i, j;
253	void *vaddr;
254	pgprot_t pgprot;
255	struct sg_table *table = buffer->priv_virt;
256	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
257	struct page **pages = vmalloc(sizeof(struct page *) * npages);
258	struct page **tmp = pages;
259
260	if (!pages)
261		return 0;
262
263	if (buffer->flags & ION_FLAG_CACHED)
264		pgprot = PAGE_KERNEL;
265	else
266		pgprot = pgprot_writecombine(PAGE_KERNEL);
267
268	for_each_sg(table->sgl, sg, table->nents, i) {
269		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
270		struct page *page = sg_page(sg);
271		BUG_ON(i >= npages);
272		for (j = 0; j < npages_this_entry; j++) {
273			*(tmp++) = page++;
274		}
275	}
276	vaddr = vmap(pages, npages, VM_MAP, pgprot);
277	vfree(pages);
278
279	return vaddr;
280}
281
282void ion_system_heap_unmap_kernel(struct ion_heap *heap,
283				  struct ion_buffer *buffer)
284{
285	vunmap(buffer->vaddr);
286}
287
288int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
289			     struct vm_area_struct *vma)
290{
291	struct sg_table *table = buffer->priv_virt;
292	unsigned long addr = vma->vm_start;
293	unsigned long offset = vma->vm_pgoff;
294	struct scatterlist *sg;
295	int i;
296
297	for_each_sg(table->sgl, sg, table->nents, i) {
298		if (offset) {
299			offset--;
300			continue;
301		}
302		remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
303				sg_dma_len(sg), vma->vm_page_prot);
304		addr += sg_dma_len(sg);
305		if (addr >= vma->vm_end)
306			return 0;
307	}
308	return 0;
309}
310
311static struct ion_heap_ops system_heap_ops = {
312	.allocate = ion_system_heap_allocate,
313	.free = ion_system_heap_free,
314	.map_dma = ion_system_heap_map_dma,
315	.unmap_dma = ion_system_heap_unmap_dma,
316	.map_kernel = ion_system_heap_map_kernel,
317	.unmap_kernel = ion_system_heap_unmap_kernel,
318	.map_user = ion_system_heap_map_user,
319};
320
321static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
322				      void *unused)
323{
324
325	struct ion_system_heap *sys_heap = container_of(heap,
326							struct ion_system_heap,
327							heap);
328	int i;
329	for (i = 0; i < num_orders; i++) {
330		struct ion_page_pool *pool = sys_heap->pools[i];
331		seq_printf(s, "%d order %u pages in pool = %lu total\n",
332			   pool->count, pool->order,
333			   (1 << pool->order) * PAGE_SIZE * pool->count);
334	}
335	return 0;
336}
337
338struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
339{
340	struct ion_system_heap *heap;
341	int i;
342
343	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
344	if (!heap)
345		return ERR_PTR(-ENOMEM);
346	heap->heap.ops = &system_heap_ops;
347	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
348	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
349			      GFP_KERNEL);
350	if (!heap->pools)
351		goto err_alloc_pools;
352	for (i = 0; i < num_orders; i++) {
353		struct ion_page_pool *pool;
354		gfp_t gfp_flags = low_order_gfp_flags;
355
356		if (orders[i] > 4)
357			gfp_flags = high_order_gfp_flags;
358		pool = ion_page_pool_create(gfp_flags, orders[i]);
359		if (!pool)
360			goto err_create_pool;
361		heap->pools[i] = pool;
362	}
363	heap->heap.debug_show = ion_system_heap_debug_show;
364	return &heap->heap;
365err_create_pool:
366	for (i = 0; i < num_orders; i++)
367		if (heap->pools[i])
368			ion_page_pool_destroy(heap->pools[i]);
369	kfree(heap->pools);
370err_alloc_pools:
371	kfree(heap);
372	return ERR_PTR(-ENOMEM);
373}
374
375void ion_system_heap_destroy(struct ion_heap *heap)
376{
377	struct ion_system_heap *sys_heap = container_of(heap,
378							struct ion_system_heap,
379							heap);
380	int i;
381
382	for (i = 0; i < num_orders; i++)
383		ion_page_pool_destroy(sys_heap->pools[i]);
384	kfree(sys_heap->pools);
385	kfree(sys_heap);
386}
387
388static int ion_system_contig_heap_allocate(struct ion_heap *heap,
389					   struct ion_buffer *buffer,
390					   unsigned long len,
391					   unsigned long align,
392					   unsigned long flags)
393{
394	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
395	if (!buffer->priv_virt)
396		return -ENOMEM;
397	return 0;
398}
399
400void ion_system_contig_heap_free(struct ion_buffer *buffer)
401{
402	kfree(buffer->priv_virt);
403}
404
405static int ion_system_contig_heap_phys(struct ion_heap *heap,
406				       struct ion_buffer *buffer,
407				       ion_phys_addr_t *addr, size_t *len)
408{
409	*addr = virt_to_phys(buffer->priv_virt);
410	*len = buffer->size;
411	return 0;
412}
413
414struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
415						struct ion_buffer *buffer)
416{
417	struct sg_table *table;
418	int ret;
419
420	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
421	if (!table)
422		return ERR_PTR(-ENOMEM);
423	ret = sg_alloc_table(table, 1, GFP_KERNEL);
424	if (ret) {
425		kfree(table);
426		return ERR_PTR(ret);
427	}
428	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
429		    0);
430	return table;
431}
432
433void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
434				      struct ion_buffer *buffer)
435{
436	sg_free_table(buffer->sg_table);
437	kfree(buffer->sg_table);
438}
439
440int ion_system_contig_heap_map_user(struct ion_heap *heap,
441				    struct ion_buffer *buffer,
442				    struct vm_area_struct *vma)
443{
444	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
445	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
446			       vma->vm_end - vma->vm_start,
447			       vma->vm_page_prot);
448
449}
450
451static struct ion_heap_ops kmalloc_ops = {
452	.allocate = ion_system_contig_heap_allocate,
453	.free = ion_system_contig_heap_free,
454	.phys = ion_system_contig_heap_phys,
455	.map_dma = ion_system_contig_heap_map_dma,
456	.unmap_dma = ion_system_contig_heap_unmap_dma,
457	.map_kernel = ion_system_heap_map_kernel,
458	.unmap_kernel = ion_system_heap_unmap_kernel,
459	.map_user = ion_system_contig_heap_map_user,
460};
461
462struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
463{
464	struct ion_heap *heap;
465
466	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
467	if (!heap)
468		return ERR_PTR(-ENOMEM);
469	heap->ops = &kmalloc_ops;
470	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
471	return heap;
472}
473
474void ion_system_contig_heap_destroy(struct ion_heap *heap)
475{
476	kfree(heap);
477}
478
479