[go: nahoru, domu]

exynos_tmu.c revision ddb31d43cb20222f929b0d242bdb516de51b6c23
1/*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 *  Copyright (C) 2011 Samsung Electronics
5 *  Donggeun Kim <dg77.kim@samsung.com>
6 *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h>
33
34#include "exynos_thermal_common.h"
35#include "exynos_tmu.h"
36#include "exynos_tmu_data.h"
37
38/**
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
40	driver
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_second: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @clk_sec: pointer to the clock structure for accessing the base_second.
51 * @temp_error1: fused value of the first point trim.
52 * @temp_error2: fused value of the second point trim.
53 * @regulator: pointer to the TMU regulator structure.
54 * @reg_conf: pointer to structure to register with core thermal.
55 */
56struct exynos_tmu_data {
57	int id;
58	struct exynos_tmu_platform_data *pdata;
59	void __iomem *base;
60	void __iomem *base_second;
61	int irq;
62	enum soc_type soc;
63	struct work_struct irq_work;
64	struct mutex lock;
65	struct clk *clk, *clk_sec;
66	u8 temp_error1, temp_error2;
67	struct regulator *regulator;
68	struct thermal_sensor_conf *reg_conf;
69};
70
71/*
72 * TMU treats temperature as a mapped temperature code.
73 * The temperature is converted differently depending on the calibration type.
74 */
75static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76{
77	struct exynos_tmu_platform_data *pdata = data->pdata;
78	int temp_code;
79
80	switch (pdata->cal_type) {
81	case TYPE_TWO_POINT_TRIMMING:
82		temp_code = (temp - pdata->first_point_trim) *
83			(data->temp_error2 - data->temp_error1) /
84			(pdata->second_point_trim - pdata->first_point_trim) +
85			data->temp_error1;
86		break;
87	case TYPE_ONE_POINT_TRIMMING:
88		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
89		break;
90	default:
91		temp_code = temp + pdata->default_temp_offset;
92		break;
93	}
94
95	return temp_code;
96}
97
98/*
99 * Calculate a temperature value from a temperature code.
100 * The unit of the temperature is degree Celsius.
101 */
102static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
103{
104	struct exynos_tmu_platform_data *pdata = data->pdata;
105	int temp;
106
107	switch (pdata->cal_type) {
108	case TYPE_TWO_POINT_TRIMMING:
109		temp = (temp_code - data->temp_error1) *
110			(pdata->second_point_trim - pdata->first_point_trim) /
111			(data->temp_error2 - data->temp_error1) +
112			pdata->first_point_trim;
113		break;
114	case TYPE_ONE_POINT_TRIMMING:
115		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
116		break;
117	default:
118		temp = temp_code - pdata->default_temp_offset;
119		break;
120	}
121
122	return temp;
123}
124
125static int exynos_tmu_initialize(struct platform_device *pdev)
126{
127	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
128	struct exynos_tmu_platform_data *pdata = data->pdata;
129	const struct exynos_tmu_registers *reg = pdata->registers;
130	unsigned int status, trim_info = 0, con;
131	unsigned int rising_threshold = 0, falling_threshold = 0;
132	int ret = 0, threshold_code, i, trigger_levs = 0;
133
134	mutex_lock(&data->lock);
135	clk_enable(data->clk);
136	if (!IS_ERR(data->clk_sec))
137		clk_enable(data->clk_sec);
138
139	if (TMU_SUPPORTS(pdata, READY_STATUS)) {
140		status = readb(data->base + reg->tmu_status);
141		if (!status) {
142			ret = -EBUSY;
143			goto out;
144		}
145	}
146
147	if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
148		__raw_writel(1, data->base + reg->triminfo_ctrl);
149
150	/* Save trimming info in order to perform calibration */
151	if (data->soc == SOC_ARCH_EXYNOS5440) {
152		/*
153		 * For exynos5440 soc triminfo value is swapped between TMU0 and
154		 * TMU2, so the below logic is needed.
155		 */
156		switch (data->id) {
157		case 0:
158			trim_info = readl(data->base +
159			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
160			break;
161		case 1:
162			trim_info = readl(data->base + reg->triminfo_data);
163			break;
164		case 2:
165			trim_info = readl(data->base -
166			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
167		}
168	} else {
169		/* On exynos5420 the triminfo register is in the shared space */
170		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
171			trim_info = readl(data->base_second +
172							reg->triminfo_data);
173		else
174			trim_info = readl(data->base + reg->triminfo_data);
175	}
176	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
177	data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
178				EXYNOS_TMU_TEMP_MASK);
179
180	if (!data->temp_error1 ||
181		(pdata->min_efuse_value > data->temp_error1) ||
182		(data->temp_error1 > pdata->max_efuse_value))
183		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
184
185	if (!data->temp_error2)
186		data->temp_error2 =
187			(pdata->efuse_value >> reg->triminfo_85_shift) &
188			EXYNOS_TMU_TEMP_MASK;
189
190	for (i = 0; i < pdata->max_trigger_level; i++) {
191		if (!pdata->trigger_levels[i])
192			continue;
193
194		/* Count trigger levels except the HW trip*/
195		if (!(pdata->trigger_type[i] == HW_TRIP))
196			trigger_levs++;
197	}
198
199	rising_threshold = readl(data->base + reg->threshold_th0);
200
201	if (data->soc == SOC_ARCH_EXYNOS4210) {
202		/* Write temperature code for threshold */
203		threshold_code = temp_to_code(data, pdata->threshold);
204		writeb(threshold_code,
205			data->base + reg->threshold_temp);
206		for (i = 0; i < trigger_levs; i++)
207			writeb(pdata->trigger_levels[i], data->base +
208			reg->threshold_th0 + i * sizeof(reg->threshold_th0));
209
210		writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
211	} else {
212		/* Write temperature code for rising and falling threshold */
213		for (i = 0;
214		i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
215			threshold_code = temp_to_code(data,
216						pdata->trigger_levels[i]);
217			rising_threshold &= ~(0xff << 8 * i);
218			rising_threshold |= threshold_code << 8 * i;
219			if (pdata->threshold_falling) {
220				threshold_code = temp_to_code(data,
221						pdata->trigger_levels[i] -
222						pdata->threshold_falling);
223				falling_threshold |= threshold_code << 8 * i;
224			}
225		}
226
227		writel(rising_threshold,
228				data->base + reg->threshold_th0);
229		writel(falling_threshold,
230				data->base + reg->threshold_th1);
231
232		writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
233			(reg->intclr_fall_mask << reg->intclr_fall_shift),
234				data->base + reg->tmu_intclear);
235
236		/* if last threshold limit is also present */
237		i = pdata->max_trigger_level - 1;
238		if (pdata->trigger_levels[i] &&
239				(pdata->trigger_type[i] == HW_TRIP)) {
240			threshold_code = temp_to_code(data,
241						pdata->trigger_levels[i]);
242			if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
243				/* 1-4 level to be assigned in th0 reg */
244				rising_threshold &= ~(0xff << 8 * i);
245				rising_threshold |= threshold_code << 8 * i;
246				writel(rising_threshold,
247					data->base + reg->threshold_th0);
248			} else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
249				/* 5th level to be assigned in th2 reg */
250				rising_threshold =
251				threshold_code << reg->threshold_th3_l0_shift;
252				writel(rising_threshold,
253					data->base + reg->threshold_th2);
254			}
255			con = readl(data->base + reg->tmu_ctrl);
256			con |= (1 << reg->therm_trip_en_shift);
257			writel(con, data->base + reg->tmu_ctrl);
258		}
259	}
260	/*Clear the PMIN in the common TMU register*/
261	if (reg->tmu_pmin && !data->id)
262		writel(0, data->base_second + reg->tmu_pmin);
263out:
264	clk_disable(data->clk);
265	mutex_unlock(&data->lock);
266	if (!IS_ERR(data->clk_sec))
267		clk_disable(data->clk_sec);
268
269	return ret;
270}
271
272static void exynos_tmu_control(struct platform_device *pdev, bool on)
273{
274	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
275	struct exynos_tmu_platform_data *pdata = data->pdata;
276	const struct exynos_tmu_registers *reg = pdata->registers;
277	unsigned int con, interrupt_en;
278
279	mutex_lock(&data->lock);
280	clk_enable(data->clk);
281
282	con = readl(data->base + reg->tmu_ctrl);
283
284	if (pdata->test_mux)
285		con |= (pdata->test_mux << reg->test_mux_addr_shift);
286
287	if (pdata->reference_voltage) {
288		con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
289		con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
290	}
291
292	if (pdata->gain) {
293		con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
294		con |= (pdata->gain << reg->buf_slope_sel_shift);
295	}
296
297	if (pdata->noise_cancel_mode) {
298		con &= ~(reg->therm_trip_mode_mask <<
299					reg->therm_trip_mode_shift);
300		con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
301	}
302
303	if (on) {
304		con |= (1 << reg->core_en_shift);
305		interrupt_en =
306			pdata->trigger_enable[3] << reg->inten_rise3_shift |
307			pdata->trigger_enable[2] << reg->inten_rise2_shift |
308			pdata->trigger_enable[1] << reg->inten_rise1_shift |
309			pdata->trigger_enable[0] << reg->inten_rise0_shift;
310		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
311			interrupt_en |=
312				interrupt_en << reg->inten_fall0_shift;
313	} else {
314		con &= ~(1 << reg->core_en_shift);
315		interrupt_en = 0; /* Disable all interrupts */
316	}
317	writel(interrupt_en, data->base + reg->tmu_inten);
318	writel(con, data->base + reg->tmu_ctrl);
319
320	clk_disable(data->clk);
321	mutex_unlock(&data->lock);
322}
323
324static int exynos_tmu_read(struct exynos_tmu_data *data)
325{
326	struct exynos_tmu_platform_data *pdata = data->pdata;
327	const struct exynos_tmu_registers *reg = pdata->registers;
328	u8 temp_code;
329	int temp;
330
331	mutex_lock(&data->lock);
332	clk_enable(data->clk);
333
334	temp_code = readb(data->base + reg->tmu_cur_temp);
335
336	if (data->soc == SOC_ARCH_EXYNOS4210)
337		/* temp_code should range between 75 and 175 */
338		if (temp_code < 75 || temp_code > 175) {
339			temp = -ENODATA;
340			goto out;
341		}
342
343	temp = code_to_temp(data, temp_code);
344out:
345	clk_disable(data->clk);
346	mutex_unlock(&data->lock);
347
348	return temp;
349}
350
351#ifdef CONFIG_THERMAL_EMULATION
352static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
353{
354	struct exynos_tmu_data *data = drv_data;
355	struct exynos_tmu_platform_data *pdata = data->pdata;
356	const struct exynos_tmu_registers *reg = pdata->registers;
357	unsigned int val;
358	int ret = -EINVAL;
359
360	if (!TMU_SUPPORTS(pdata, EMULATION))
361		goto out;
362
363	if (temp && temp < MCELSIUS)
364		goto out;
365
366	mutex_lock(&data->lock);
367	clk_enable(data->clk);
368
369	val = readl(data->base + reg->emul_con);
370
371	if (temp) {
372		temp /= MCELSIUS;
373
374		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
375			val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
376			val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
377		}
378		val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
379		val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
380			EXYNOS_EMUL_ENABLE;
381	} else {
382		val &= ~EXYNOS_EMUL_ENABLE;
383	}
384
385	writel(val, data->base + reg->emul_con);
386
387	clk_disable(data->clk);
388	mutex_unlock(&data->lock);
389	return 0;
390out:
391	return ret;
392}
393#else
394static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
395	{ return -EINVAL; }
396#endif/*CONFIG_THERMAL_EMULATION*/
397
398static void exynos_tmu_work(struct work_struct *work)
399{
400	struct exynos_tmu_data *data = container_of(work,
401			struct exynos_tmu_data, irq_work);
402	struct exynos_tmu_platform_data *pdata = data->pdata;
403	const struct exynos_tmu_registers *reg = pdata->registers;
404	unsigned int val_irq, val_type;
405
406	if (!IS_ERR(data->clk_sec))
407		clk_enable(data->clk_sec);
408	/* Find which sensor generated this interrupt */
409	if (reg->tmu_irqstatus) {
410		val_type = readl(data->base_second + reg->tmu_irqstatus);
411		if (!((val_type >> data->id) & 0x1))
412			goto out;
413	}
414	if (!IS_ERR(data->clk_sec))
415		clk_disable(data->clk_sec);
416
417	exynos_report_trigger(data->reg_conf);
418	mutex_lock(&data->lock);
419	clk_enable(data->clk);
420
421	/* TODO: take action based on particular interrupt */
422	val_irq = readl(data->base + reg->tmu_intstat);
423	/* clear the interrupts */
424	writel(val_irq, data->base + reg->tmu_intclear);
425
426	clk_disable(data->clk);
427	mutex_unlock(&data->lock);
428out:
429	enable_irq(data->irq);
430}
431
432static irqreturn_t exynos_tmu_irq(int irq, void *id)
433{
434	struct exynos_tmu_data *data = id;
435
436	disable_irq_nosync(irq);
437	schedule_work(&data->irq_work);
438
439	return IRQ_HANDLED;
440}
441
442static const struct of_device_id exynos_tmu_match[] = {
443	{
444		.compatible = "samsung,exynos3250-tmu",
445		.data = (void *)EXYNOS3250_TMU_DRV_DATA,
446	},
447	{
448		.compatible = "samsung,exynos4210-tmu",
449		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
450	},
451	{
452		.compatible = "samsung,exynos4412-tmu",
453		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
454	},
455	{
456		.compatible = "samsung,exynos5250-tmu",
457		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
458	},
459	{
460		.compatible = "samsung,exynos5260-tmu",
461		.data = (void *)EXYNOS5260_TMU_DRV_DATA,
462	},
463	{
464		.compatible = "samsung,exynos5420-tmu",
465		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
466	},
467	{
468		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
469		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
470	},
471	{
472		.compatible = "samsung,exynos5440-tmu",
473		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
474	},
475	{},
476};
477MODULE_DEVICE_TABLE(of, exynos_tmu_match);
478
479static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
480			struct platform_device *pdev, int id)
481{
482	struct  exynos_tmu_init_data *data_table;
483	struct exynos_tmu_platform_data *tmu_data;
484	const struct of_device_id *match;
485
486	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
487	if (!match)
488		return NULL;
489	data_table = (struct exynos_tmu_init_data *) match->data;
490	if (!data_table || id >= data_table->tmu_count)
491		return NULL;
492	tmu_data = data_table->tmu_data;
493	return (struct exynos_tmu_platform_data *) (tmu_data + id);
494}
495
496static int exynos_map_dt_data(struct platform_device *pdev)
497{
498	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
499	struct exynos_tmu_platform_data *pdata;
500	struct resource res;
501	int ret;
502
503	if (!data || !pdev->dev.of_node)
504		return -ENODEV;
505
506	/*
507	 * Try enabling the regulator if found
508	 * TODO: Add regulator as an SOC feature, so that regulator enable
509	 * is a compulsory call.
510	 */
511	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
512	if (!IS_ERR(data->regulator)) {
513		ret = regulator_enable(data->regulator);
514		if (ret) {
515			dev_err(&pdev->dev, "failed to enable vtmu\n");
516			return ret;
517		}
518	} else {
519		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
520	}
521
522	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
523	if (data->id < 0)
524		data->id = 0;
525
526	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
527	if (data->irq <= 0) {
528		dev_err(&pdev->dev, "failed to get IRQ\n");
529		return -ENODEV;
530	}
531
532	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
533		dev_err(&pdev->dev, "failed to get Resource 0\n");
534		return -ENODEV;
535	}
536
537	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
538	if (!data->base) {
539		dev_err(&pdev->dev, "Failed to ioremap memory\n");
540		return -EADDRNOTAVAIL;
541	}
542
543	pdata = exynos_get_driver_data(pdev, data->id);
544	if (!pdata) {
545		dev_err(&pdev->dev, "No platform init data supplied.\n");
546		return -ENODEV;
547	}
548	data->pdata = pdata;
549	/*
550	 * Check if the TMU shares some registers and then try to map the
551	 * memory of common registers.
552	 */
553	if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
554		return 0;
555
556	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
557		dev_err(&pdev->dev, "failed to get Resource 1\n");
558		return -ENODEV;
559	}
560
561	data->base_second = devm_ioremap(&pdev->dev, res.start,
562					resource_size(&res));
563	if (!data->base_second) {
564		dev_err(&pdev->dev, "Failed to ioremap memory\n");
565		return -ENOMEM;
566	}
567
568	return 0;
569}
570
571static int exynos_tmu_probe(struct platform_device *pdev)
572{
573	struct exynos_tmu_data *data;
574	struct exynos_tmu_platform_data *pdata;
575	struct thermal_sensor_conf *sensor_conf;
576	int ret, i;
577
578	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
579					GFP_KERNEL);
580	if (!data)
581		return -ENOMEM;
582
583	platform_set_drvdata(pdev, data);
584	mutex_init(&data->lock);
585
586	ret = exynos_map_dt_data(pdev);
587	if (ret)
588		return ret;
589
590	pdata = data->pdata;
591
592	INIT_WORK(&data->irq_work, exynos_tmu_work);
593
594	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
595	if (IS_ERR(data->clk)) {
596		dev_err(&pdev->dev, "Failed to get clock\n");
597		return  PTR_ERR(data->clk);
598	}
599
600	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
601	if (IS_ERR(data->clk_sec)) {
602		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
603			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
604			return PTR_ERR(data->clk_sec);
605		}
606	} else {
607		ret = clk_prepare(data->clk_sec);
608		if (ret) {
609			dev_err(&pdev->dev, "Failed to get clock\n");
610			return ret;
611		}
612	}
613
614	ret = clk_prepare(data->clk);
615	if (ret) {
616		dev_err(&pdev->dev, "Failed to get clock\n");
617		goto err_clk_sec;
618	}
619
620	if (pdata->type == SOC_ARCH_EXYNOS3250 ||
621	    pdata->type == SOC_ARCH_EXYNOS4210 ||
622	    pdata->type == SOC_ARCH_EXYNOS4412 ||
623	    pdata->type == SOC_ARCH_EXYNOS5250 ||
624	    pdata->type == SOC_ARCH_EXYNOS5260 ||
625	    pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
626	    pdata->type == SOC_ARCH_EXYNOS5440)
627		data->soc = pdata->type;
628	else {
629		ret = -EINVAL;
630		dev_err(&pdev->dev, "Platform not supported\n");
631		goto err_clk;
632	}
633
634	ret = exynos_tmu_initialize(pdev);
635	if (ret) {
636		dev_err(&pdev->dev, "Failed to initialize TMU\n");
637		goto err_clk;
638	}
639
640	exynos_tmu_control(pdev, true);
641
642	/* Allocate a structure to register with the exynos core thermal */
643	sensor_conf = devm_kzalloc(&pdev->dev,
644				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
645	if (!sensor_conf) {
646		ret = -ENOMEM;
647		goto err_clk;
648	}
649	sprintf(sensor_conf->name, "therm_zone%d", data->id);
650	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
651	sensor_conf->write_emul_temp =
652		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
653	sensor_conf->driver_data = data;
654	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
655			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
656			pdata->trigger_enable[3];
657
658	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
659		sensor_conf->trip_data.trip_val[i] =
660			pdata->threshold + pdata->trigger_levels[i];
661		sensor_conf->trip_data.trip_type[i] =
662					pdata->trigger_type[i];
663	}
664
665	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
666
667	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
668	for (i = 0; i < pdata->freq_tab_count; i++) {
669		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
670					pdata->freq_tab[i].freq_clip_max;
671		sensor_conf->cooling_data.freq_data[i].temp_level =
672					pdata->freq_tab[i].temp_level;
673	}
674	sensor_conf->dev = &pdev->dev;
675	/* Register the sensor with thermal management interface */
676	ret = exynos_register_thermal(sensor_conf);
677	if (ret) {
678		dev_err(&pdev->dev, "Failed to register thermal interface\n");
679		goto err_clk;
680	}
681	data->reg_conf = sensor_conf;
682
683	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
684		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
685	if (ret) {
686		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
687		goto err_clk;
688	}
689
690	return 0;
691err_clk:
692	clk_unprepare(data->clk);
693err_clk_sec:
694	if (!IS_ERR(data->clk_sec))
695		clk_unprepare(data->clk_sec);
696	return ret;
697}
698
699static int exynos_tmu_remove(struct platform_device *pdev)
700{
701	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
702
703	exynos_unregister_thermal(data->reg_conf);
704
705	exynos_tmu_control(pdev, false);
706
707	clk_unprepare(data->clk);
708	if (!IS_ERR(data->clk_sec))
709		clk_unprepare(data->clk_sec);
710
711	if (!IS_ERR(data->regulator))
712		regulator_disable(data->regulator);
713
714	return 0;
715}
716
717#ifdef CONFIG_PM_SLEEP
718static int exynos_tmu_suspend(struct device *dev)
719{
720	exynos_tmu_control(to_platform_device(dev), false);
721
722	return 0;
723}
724
725static int exynos_tmu_resume(struct device *dev)
726{
727	struct platform_device *pdev = to_platform_device(dev);
728
729	exynos_tmu_initialize(pdev);
730	exynos_tmu_control(pdev, true);
731
732	return 0;
733}
734
735static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
736			 exynos_tmu_suspend, exynos_tmu_resume);
737#define EXYNOS_TMU_PM	(&exynos_tmu_pm)
738#else
739#define EXYNOS_TMU_PM	NULL
740#endif
741
742static struct platform_driver exynos_tmu_driver = {
743	.driver = {
744		.name   = "exynos-tmu",
745		.owner  = THIS_MODULE,
746		.pm     = EXYNOS_TMU_PM,
747		.of_match_table = exynos_tmu_match,
748	},
749	.probe = exynos_tmu_probe,
750	.remove	= exynos_tmu_remove,
751};
752
753module_platform_driver(exynos_tmu_driver);
754
755MODULE_DESCRIPTION("EXYNOS TMU Driver");
756MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
757MODULE_LICENSE("GPL");
758MODULE_ALIAS("platform:exynos-tmu");
759