[go: nahoru, domu]

1/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013, The Linux Foundation. All Rights Reserved.
5 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
6 *		http://www.samsung.com
7 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
8 * Author: Josh Cartwright <joshc@codeaurora.org>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License or (at your optional) any later version of the license.
14 */
15
16#include <linux/err.h>
17#include <linux/of.h>
18#include <linux/of_fdt.h>
19#include <linux/of_platform.h>
20#include <linux/mm.h>
21#include <linux/sizes.h>
22#include <linux/of_reserved_mem.h>
23
24#define MAX_RESERVED_REGIONS	16
25static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
26static int reserved_mem_count;
27
28#if defined(CONFIG_HAVE_MEMBLOCK)
29#include <linux/memblock.h>
30int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
31	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
32	phys_addr_t *res_base)
33{
34	/*
35	 * We use __memblock_alloc_base() because memblock_alloc_base()
36	 * panic()s on allocation failure.
37	 */
38	phys_addr_t base = __memblock_alloc_base(size, align, end);
39	if (!base)
40		return -ENOMEM;
41
42	/*
43	 * Check if the allocated region fits in to start..end window
44	 */
45	if (base < start) {
46		memblock_free(base, size);
47		return -ENOMEM;
48	}
49
50	*res_base = base;
51	if (nomap)
52		return memblock_remove(base, size);
53	return 0;
54}
55#else
56int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
57	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
58	phys_addr_t *res_base)
59{
60	pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
61		  size, nomap ? " (nomap)" : "");
62	return -ENOSYS;
63}
64#endif
65
66/**
67 * res_mem_save_node() - save fdt node for second pass initialization
68 */
69void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
70				      phys_addr_t base, phys_addr_t size)
71{
72	struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
73
74	if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
75		pr_err("Reserved memory: not enough space all defined regions.\n");
76		return;
77	}
78
79	rmem->fdt_node = node;
80	rmem->name = uname;
81	rmem->base = base;
82	rmem->size = size;
83
84	reserved_mem_count++;
85	return;
86}
87
88/**
89 * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
90 *			  and 'alloc-ranges' properties
91 */
92static int __init __reserved_mem_alloc_size(unsigned long node,
93	const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
94{
95	int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
96	phys_addr_t start = 0, end = 0;
97	phys_addr_t base = 0, align = 0, size;
98	int len;
99	const __be32 *prop;
100	int nomap;
101	int ret;
102
103	prop = of_get_flat_dt_prop(node, "size", &len);
104	if (!prop)
105		return -EINVAL;
106
107	if (len != dt_root_size_cells * sizeof(__be32)) {
108		pr_err("Reserved memory: invalid size property in '%s' node.\n",
109				uname);
110		return -EINVAL;
111	}
112	size = dt_mem_next_cell(dt_root_size_cells, &prop);
113
114	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
115
116	prop = of_get_flat_dt_prop(node, "alignment", &len);
117	if (prop) {
118		if (len != dt_root_addr_cells * sizeof(__be32)) {
119			pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
120				uname);
121			return -EINVAL;
122		}
123		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
124	}
125
126	prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
127	if (prop) {
128
129		if (len % t_len != 0) {
130			pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
131			       uname);
132			return -EINVAL;
133		}
134
135		base = 0;
136
137		while (len > 0) {
138			start = dt_mem_next_cell(dt_root_addr_cells, &prop);
139			end = start + dt_mem_next_cell(dt_root_size_cells,
140						       &prop);
141
142			ret = early_init_dt_alloc_reserved_memory_arch(size,
143					align, start, end, nomap, &base);
144			if (ret == 0) {
145				pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
146					uname, &base,
147					(unsigned long)size / SZ_1M);
148				break;
149			}
150			len -= t_len;
151		}
152
153	} else {
154		ret = early_init_dt_alloc_reserved_memory_arch(size, align,
155							0, 0, nomap, &base);
156		if (ret == 0)
157			pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
158				uname, &base, (unsigned long)size / SZ_1M);
159	}
160
161	if (base == 0) {
162		pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
163			uname);
164		return -ENOMEM;
165	}
166
167	*res_base = base;
168	*res_size = size;
169
170	return 0;
171}
172
173static const struct of_device_id __rmem_of_table_sentinel
174	__used __section(__reservedmem_of_table_end);
175
176/**
177 * res_mem_init_node() - call region specific reserved memory init code
178 */
179static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
180{
181	extern const struct of_device_id __reservedmem_of_table[];
182	const struct of_device_id *i;
183
184	for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
185		reservedmem_of_init_fn initfn = i->data;
186		const char *compat = i->compatible;
187
188		if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
189			continue;
190
191		if (initfn(rmem) == 0) {
192			pr_info("Reserved memory: initialized node %s, compatible id %s\n",
193				rmem->name, compat);
194			return 0;
195		}
196	}
197	return -ENOENT;
198}
199
200/**
201 * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
202 */
203void __init fdt_init_reserved_mem(void)
204{
205	int i;
206	for (i = 0; i < reserved_mem_count; i++) {
207		struct reserved_mem *rmem = &reserved_mem[i];
208		unsigned long node = rmem->fdt_node;
209		int len;
210		const __be32 *prop;
211		int err = 0;
212
213		prop = of_get_flat_dt_prop(node, "phandle", &len);
214		if (!prop)
215			prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
216		if (prop)
217			rmem->phandle = of_read_number(prop, len/4);
218
219		if (rmem->size == 0)
220			err = __reserved_mem_alloc_size(node, rmem->name,
221						 &rmem->base, &rmem->size);
222		if (err == 0)
223			__reserved_mem_init_node(rmem);
224	}
225}
226
227static inline struct reserved_mem *__find_rmem(struct device_node *node)
228{
229	unsigned int i;
230
231	if (!node->phandle)
232		return NULL;
233
234	for (i = 0; i < reserved_mem_count; i++)
235		if (reserved_mem[i].phandle == node->phandle)
236			return &reserved_mem[i];
237	return NULL;
238}
239
240/**
241 * of_reserved_mem_device_init() - assign reserved memory region to given device
242 *
243 * This function assign memory region pointed by "memory-region" device tree
244 * property to the given device.
245 */
246int of_reserved_mem_device_init(struct device *dev)
247{
248	struct reserved_mem *rmem;
249	struct device_node *np;
250	int ret;
251
252	np = of_parse_phandle(dev->of_node, "memory-region", 0);
253	if (!np)
254		return -ENODEV;
255
256	rmem = __find_rmem(np);
257	of_node_put(np);
258
259	if (!rmem || !rmem->ops || !rmem->ops->device_init)
260		return -EINVAL;
261
262	ret = rmem->ops->device_init(rmem, dev);
263	if (ret == 0)
264		dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
265
266	return ret;
267}
268
269/**
270 * of_reserved_mem_device_release() - release reserved memory device structures
271 *
272 * This function releases structures allocated for memory region handling for
273 * the given device.
274 */
275void of_reserved_mem_device_release(struct device *dev)
276{
277	struct reserved_mem *rmem;
278	struct device_node *np;
279
280	np = of_parse_phandle(dev->of_node, "memory-region", 0);
281	if (!np)
282		return;
283
284	rmem = __find_rmem(np);
285	of_node_put(np);
286
287	if (!rmem || !rmem->ops || !rmem->ops->device_release)
288		return;
289
290	rmem->ops->device_release(rmem, dev);
291}
292