[go: nahoru, domu]

exynos_tmu.c revision 86f5362e7a1903455053511ed11ecceb8dd6d6dd
1/*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 *  Copyright (C) 2011 Samsung Electronics
5 *  Donggeun Kim <dg77.kim@samsung.com>
6 *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h>
33
34#include "exynos_thermal_common.h"
35#include "exynos_tmu.h"
36#include "exynos_tmu_data.h"
37
38/**
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
40	driver
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_common: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @temp_error1: fused value of the first point trim.
51 * @temp_error2: fused value of the second point trim.
52 * @regulator: pointer to the TMU regulator structure.
53 * @reg_conf: pointer to structure to register with core thermal.
54 */
55struct exynos_tmu_data {
56	int id;
57	struct exynos_tmu_platform_data *pdata;
58	void __iomem *base;
59	void __iomem *base_common;
60	int irq;
61	enum soc_type soc;
62	struct work_struct irq_work;
63	struct mutex lock;
64	struct clk *clk;
65	u8 temp_error1, temp_error2;
66	struct regulator *regulator;
67	struct thermal_sensor_conf *reg_conf;
68};
69
70/*
71 * TMU treats temperature as a mapped temperature code.
72 * The temperature is converted differently depending on the calibration type.
73 */
74static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
75{
76	struct exynos_tmu_platform_data *pdata = data->pdata;
77	int temp_code;
78
79	if (pdata->cal_mode == HW_MODE)
80		return temp;
81
82	if (data->soc == SOC_ARCH_EXYNOS4210)
83		/* temp should range between 25 and 125 */
84		if (temp < 25 || temp > 125) {
85			temp_code = -EINVAL;
86			goto out;
87		}
88
89	switch (pdata->cal_type) {
90	case TYPE_TWO_POINT_TRIMMING:
91		temp_code = (temp - pdata->first_point_trim) *
92			(data->temp_error2 - data->temp_error1) /
93			(pdata->second_point_trim - pdata->first_point_trim) +
94			data->temp_error1;
95		break;
96	case TYPE_ONE_POINT_TRIMMING:
97		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
98		break;
99	default:
100		temp_code = temp + pdata->default_temp_offset;
101		break;
102	}
103out:
104	return temp_code;
105}
106
107/*
108 * Calculate a temperature value from a temperature code.
109 * The unit of the temperature is degree Celsius.
110 */
111static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
112{
113	struct exynos_tmu_platform_data *pdata = data->pdata;
114	int temp;
115
116	if (pdata->cal_mode == HW_MODE)
117		return temp_code;
118
119	if (data->soc == SOC_ARCH_EXYNOS4210)
120		/* temp_code should range between 75 and 175 */
121		if (temp_code < 75 || temp_code > 175) {
122			temp = -ENODATA;
123			goto out;
124		}
125
126	switch (pdata->cal_type) {
127	case TYPE_TWO_POINT_TRIMMING:
128		temp = (temp_code - data->temp_error1) *
129			(pdata->second_point_trim - pdata->first_point_trim) /
130			(data->temp_error2 - data->temp_error1) +
131			pdata->first_point_trim;
132		break;
133	case TYPE_ONE_POINT_TRIMMING:
134		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
135		break;
136	default:
137		temp = temp_code - pdata->default_temp_offset;
138		break;
139	}
140out:
141	return temp;
142}
143
144static int exynos_tmu_initialize(struct platform_device *pdev)
145{
146	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
147	struct exynos_tmu_platform_data *pdata = data->pdata;
148	const struct exynos_tmu_registers *reg = pdata->registers;
149	unsigned int status, trim_info = 0, con;
150	unsigned int rising_threshold = 0, falling_threshold = 0;
151	int ret = 0, threshold_code, i, trigger_levs = 0;
152
153	mutex_lock(&data->lock);
154	clk_enable(data->clk);
155
156	if (TMU_SUPPORTS(pdata, READY_STATUS)) {
157		status = readb(data->base + reg->tmu_status);
158		if (!status) {
159			ret = -EBUSY;
160			goto out;
161		}
162	}
163
164	if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
165		__raw_writel(1, data->base + reg->triminfo_ctrl);
166
167	if (pdata->cal_mode == HW_MODE)
168		goto skip_calib_data;
169
170	/* Save trimming info in order to perform calibration */
171	if (data->soc == SOC_ARCH_EXYNOS5440) {
172		/*
173		 * For exynos5440 soc triminfo value is swapped between TMU0 and
174		 * TMU2, so the below logic is needed.
175		 */
176		switch (data->id) {
177		case 0:
178			trim_info = readl(data->base +
179			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
180			break;
181		case 1:
182			trim_info = readl(data->base + reg->triminfo_data);
183			break;
184		case 2:
185			trim_info = readl(data->base -
186			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
187		}
188	} else {
189		trim_info = readl(data->base + reg->triminfo_data);
190	}
191	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
192	data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
193				EXYNOS_TMU_TEMP_MASK);
194
195	if (!data->temp_error1 ||
196		(pdata->min_efuse_value > data->temp_error1) ||
197		(data->temp_error1 > pdata->max_efuse_value))
198		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
199
200	if (!data->temp_error2)
201		data->temp_error2 =
202			(pdata->efuse_value >> reg->triminfo_85_shift) &
203			EXYNOS_TMU_TEMP_MASK;
204
205skip_calib_data:
206	if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
207		dev_err(&pdev->dev, "Invalid max trigger level\n");
208		goto out;
209	}
210
211	for (i = 0; i < pdata->max_trigger_level; i++) {
212		if (!pdata->trigger_levels[i])
213			continue;
214
215		if ((pdata->trigger_type[i] == HW_TRIP) &&
216		(!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
217			dev_err(&pdev->dev, "Invalid hw trigger level\n");
218			ret = -EINVAL;
219			goto out;
220		}
221
222		/* Count trigger levels except the HW trip*/
223		if (!(pdata->trigger_type[i] == HW_TRIP))
224			trigger_levs++;
225	}
226
227	if (data->soc == SOC_ARCH_EXYNOS4210) {
228		/* Write temperature code for threshold */
229		threshold_code = temp_to_code(data, pdata->threshold);
230		if (threshold_code < 0) {
231			ret = threshold_code;
232			goto out;
233		}
234		writeb(threshold_code,
235			data->base + reg->threshold_temp);
236		for (i = 0; i < trigger_levs; i++)
237			writeb(pdata->trigger_levels[i], data->base +
238			reg->threshold_th0 + i * sizeof(reg->threshold_th0));
239
240		writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
241	} else {
242		/* Write temperature code for rising and falling threshold */
243		for (i = 0;
244		i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
245			threshold_code = temp_to_code(data,
246						pdata->trigger_levels[i]);
247			if (threshold_code < 0) {
248				ret = threshold_code;
249				goto out;
250			}
251			rising_threshold |= threshold_code << 8 * i;
252			if (pdata->threshold_falling) {
253				threshold_code = temp_to_code(data,
254						pdata->trigger_levels[i] -
255						pdata->threshold_falling);
256				if (threshold_code > 0)
257					falling_threshold |=
258						threshold_code << 8 * i;
259			}
260		}
261
262		writel(rising_threshold,
263				data->base + reg->threshold_th0);
264		writel(falling_threshold,
265				data->base + reg->threshold_th1);
266
267		writel((reg->inten_rise_mask << reg->inten_rise_shift) |
268			(reg->inten_fall_mask << reg->inten_fall_shift),
269				data->base + reg->tmu_intclear);
270
271		/* if last threshold limit is also present */
272		i = pdata->max_trigger_level - 1;
273		if (pdata->trigger_levels[i] &&
274				(pdata->trigger_type[i] == HW_TRIP)) {
275			threshold_code = temp_to_code(data,
276						pdata->trigger_levels[i]);
277			if (threshold_code < 0) {
278				ret = threshold_code;
279				goto out;
280			}
281			if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
282				/* 1-4 level to be assigned in th0 reg */
283				rising_threshold |= threshold_code << 8 * i;
284				writel(rising_threshold,
285					data->base + reg->threshold_th0);
286			} else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
287				/* 5th level to be assigned in th2 reg */
288				rising_threshold =
289				threshold_code << reg->threshold_th3_l0_shift;
290				writel(rising_threshold,
291					data->base + reg->threshold_th2);
292			}
293			con = readl(data->base + reg->tmu_ctrl);
294			con |= (1 << reg->therm_trip_en_shift);
295			writel(con, data->base + reg->tmu_ctrl);
296		}
297	}
298	/*Clear the PMIN in the common TMU register*/
299	if (reg->tmu_pmin && !data->id)
300		writel(0, data->base_common + reg->tmu_pmin);
301out:
302	clk_disable(data->clk);
303	mutex_unlock(&data->lock);
304
305	return ret;
306}
307
308static void exynos_tmu_control(struct platform_device *pdev, bool on)
309{
310	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
311	struct exynos_tmu_platform_data *pdata = data->pdata;
312	const struct exynos_tmu_registers *reg = pdata->registers;
313	unsigned int con, interrupt_en, cal_val;
314
315	mutex_lock(&data->lock);
316	clk_enable(data->clk);
317
318	con = readl(data->base + reg->tmu_ctrl);
319
320	if (pdata->test_mux)
321		con |= (pdata->test_mux << reg->test_mux_addr_shift);
322
323	if (pdata->reference_voltage) {
324		con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
325		con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
326	}
327
328	if (pdata->gain) {
329		con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
330		con |= (pdata->gain << reg->buf_slope_sel_shift);
331	}
332
333	if (pdata->noise_cancel_mode) {
334		con &= ~(reg->therm_trip_mode_mask <<
335					reg->therm_trip_mode_shift);
336		con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
337	}
338
339	if (pdata->cal_mode == HW_MODE) {
340		con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
341		cal_val = 0;
342		switch (pdata->cal_type) {
343		case TYPE_TWO_POINT_TRIMMING:
344			cal_val = 3;
345			break;
346		case TYPE_ONE_POINT_TRIMMING_85:
347			cal_val = 2;
348			break;
349		case TYPE_ONE_POINT_TRIMMING_25:
350			cal_val = 1;
351			break;
352		case TYPE_NONE:
353			break;
354		default:
355			dev_err(&pdev->dev, "Invalid calibration type, using none\n");
356		}
357		con |= cal_val << reg->calib_mode_shift;
358	}
359
360	if (on) {
361		con |= (1 << reg->core_en_shift);
362		interrupt_en =
363			pdata->trigger_enable[3] << reg->inten_rise3_shift |
364			pdata->trigger_enable[2] << reg->inten_rise2_shift |
365			pdata->trigger_enable[1] << reg->inten_rise1_shift |
366			pdata->trigger_enable[0] << reg->inten_rise0_shift;
367		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
368			interrupt_en |=
369				interrupt_en << reg->inten_fall0_shift;
370	} else {
371		con &= ~(1 << reg->core_en_shift);
372		interrupt_en = 0; /* Disable all interrupts */
373	}
374	writel(interrupt_en, data->base + reg->tmu_inten);
375	writel(con, data->base + reg->tmu_ctrl);
376
377	clk_disable(data->clk);
378	mutex_unlock(&data->lock);
379}
380
381static int exynos_tmu_read(struct exynos_tmu_data *data)
382{
383	struct exynos_tmu_platform_data *pdata = data->pdata;
384	const struct exynos_tmu_registers *reg = pdata->registers;
385	u8 temp_code;
386	int temp;
387
388	mutex_lock(&data->lock);
389	clk_enable(data->clk);
390
391	temp_code = readb(data->base + reg->tmu_cur_temp);
392	temp = code_to_temp(data, temp_code);
393
394	clk_disable(data->clk);
395	mutex_unlock(&data->lock);
396
397	return temp;
398}
399
400#ifdef CONFIG_THERMAL_EMULATION
401static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
402{
403	struct exynos_tmu_data *data = drv_data;
404	struct exynos_tmu_platform_data *pdata = data->pdata;
405	const struct exynos_tmu_registers *reg = pdata->registers;
406	unsigned int val;
407	int ret = -EINVAL;
408
409	if (!TMU_SUPPORTS(pdata, EMULATION))
410		goto out;
411
412	if (temp && temp < MCELSIUS)
413		goto out;
414
415	mutex_lock(&data->lock);
416	clk_enable(data->clk);
417
418	val = readl(data->base + reg->emul_con);
419
420	if (temp) {
421		temp /= MCELSIUS;
422
423		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
424			val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
425			val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
426		}
427		val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
428		val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
429			EXYNOS_EMUL_ENABLE;
430	} else {
431		val &= ~EXYNOS_EMUL_ENABLE;
432	}
433
434	writel(val, data->base + reg->emul_con);
435
436	clk_disable(data->clk);
437	mutex_unlock(&data->lock);
438	return 0;
439out:
440	return ret;
441}
442#else
443static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
444	{ return -EINVAL; }
445#endif/*CONFIG_THERMAL_EMULATION*/
446
447static void exynos_tmu_work(struct work_struct *work)
448{
449	struct exynos_tmu_data *data = container_of(work,
450			struct exynos_tmu_data, irq_work);
451	struct exynos_tmu_platform_data *pdata = data->pdata;
452	const struct exynos_tmu_registers *reg = pdata->registers;
453	unsigned int val_irq, val_type;
454
455	/* Find which sensor generated this interrupt */
456	if (reg->tmu_irqstatus) {
457		val_type = readl(data->base_common + reg->tmu_irqstatus);
458		if (!((val_type >> data->id) & 0x1))
459			goto out;
460	}
461
462	exynos_report_trigger(data->reg_conf);
463	mutex_lock(&data->lock);
464	clk_enable(data->clk);
465
466	/* TODO: take action based on particular interrupt */
467	val_irq = readl(data->base + reg->tmu_intstat);
468	/* clear the interrupts */
469	writel(val_irq, data->base + reg->tmu_intclear);
470
471	clk_disable(data->clk);
472	mutex_unlock(&data->lock);
473out:
474	enable_irq(data->irq);
475}
476
477static irqreturn_t exynos_tmu_irq(int irq, void *id)
478{
479	struct exynos_tmu_data *data = id;
480
481	disable_irq_nosync(irq);
482	schedule_work(&data->irq_work);
483
484	return IRQ_HANDLED;
485}
486
487static const struct of_device_id exynos_tmu_match[] = {
488	{
489		.compatible = "samsung,exynos4210-tmu",
490		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
491	},
492	{
493		.compatible = "samsung,exynos4412-tmu",
494		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
495	},
496	{
497		.compatible = "samsung,exynos5250-tmu",
498		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
499	},
500	{
501		.compatible = "samsung,exynos5440-tmu",
502		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
503	},
504	{},
505};
506MODULE_DEVICE_TABLE(of, exynos_tmu_match);
507
508static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
509			struct platform_device *pdev, int id)
510{
511	struct  exynos_tmu_init_data *data_table;
512	struct exynos_tmu_platform_data *tmu_data;
513	const struct of_device_id *match;
514
515	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
516	if (!match)
517		return NULL;
518	data_table = (struct exynos_tmu_init_data *) match->data;
519	if (!data_table || id >= data_table->tmu_count)
520		return NULL;
521	tmu_data = data_table->tmu_data;
522	return (struct exynos_tmu_platform_data *) (tmu_data + id);
523}
524
525static int exynos_map_dt_data(struct platform_device *pdev)
526{
527	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
528	struct exynos_tmu_platform_data *pdata;
529	struct resource res;
530	int ret;
531
532	if (!data || !pdev->dev.of_node)
533		return -ENODEV;
534
535	/*
536	 * Try enabling the regulator if found
537	 * TODO: Add regulator as an SOC feature, so that regulator enable
538	 * is a compulsory call.
539	 */
540	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
541	if (!IS_ERR(data->regulator)) {
542		ret = regulator_enable(data->regulator);
543		if (ret) {
544			dev_err(&pdev->dev, "failed to enable vtmu\n");
545			return ret;
546		}
547	} else {
548		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
549	}
550
551	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
552	if (data->id < 0)
553		data->id = 0;
554
555	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
556	if (data->irq <= 0) {
557		dev_err(&pdev->dev, "failed to get IRQ\n");
558		return -ENODEV;
559	}
560
561	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
562		dev_err(&pdev->dev, "failed to get Resource 0\n");
563		return -ENODEV;
564	}
565
566	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
567	if (!data->base) {
568		dev_err(&pdev->dev, "Failed to ioremap memory\n");
569		return -EADDRNOTAVAIL;
570	}
571
572	pdata = exynos_get_driver_data(pdev, data->id);
573	if (!pdata) {
574		dev_err(&pdev->dev, "No platform init data supplied.\n");
575		return -ENODEV;
576	}
577	data->pdata = pdata;
578	/*
579	 * Check if the TMU shares some registers and then try to map the
580	 * memory of common registers.
581	 */
582	if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
583		return 0;
584
585	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
586		dev_err(&pdev->dev, "failed to get Resource 1\n");
587		return -ENODEV;
588	}
589
590	data->base_common = devm_ioremap(&pdev->dev, res.start,
591					resource_size(&res));
592	if (!data->base_common) {
593		dev_err(&pdev->dev, "Failed to ioremap memory\n");
594		return -ENOMEM;
595	}
596
597	return 0;
598}
599
600static int exynos_tmu_probe(struct platform_device *pdev)
601{
602	struct exynos_tmu_data *data;
603	struct exynos_tmu_platform_data *pdata;
604	struct thermal_sensor_conf *sensor_conf;
605	int ret, i;
606
607	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
608					GFP_KERNEL);
609	if (!data) {
610		dev_err(&pdev->dev, "Failed to allocate driver structure\n");
611		return -ENOMEM;
612	}
613
614	platform_set_drvdata(pdev, data);
615	mutex_init(&data->lock);
616
617	ret = exynos_map_dt_data(pdev);
618	if (ret)
619		return ret;
620
621	pdata = data->pdata;
622
623	INIT_WORK(&data->irq_work, exynos_tmu_work);
624
625	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
626	if (IS_ERR(data->clk)) {
627		dev_err(&pdev->dev, "Failed to get clock\n");
628		return  PTR_ERR(data->clk);
629	}
630
631	ret = clk_prepare(data->clk);
632	if (ret)
633		return ret;
634
635	if (pdata->type == SOC_ARCH_EXYNOS4210 ||
636	    pdata->type == SOC_ARCH_EXYNOS4412 ||
637	    pdata->type == SOC_ARCH_EXYNOS5250 ||
638	    pdata->type == SOC_ARCH_EXYNOS5440)
639		data->soc = pdata->type;
640	else {
641		ret = -EINVAL;
642		dev_err(&pdev->dev, "Platform not supported\n");
643		goto err_clk;
644	}
645
646	ret = exynos_tmu_initialize(pdev);
647	if (ret) {
648		dev_err(&pdev->dev, "Failed to initialize TMU\n");
649		goto err_clk;
650	}
651
652	exynos_tmu_control(pdev, true);
653
654	/* Allocate a structure to register with the exynos core thermal */
655	sensor_conf = devm_kzalloc(&pdev->dev,
656				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
657	if (!sensor_conf) {
658		dev_err(&pdev->dev, "Failed to allocate registration struct\n");
659		ret = -ENOMEM;
660		goto err_clk;
661	}
662	sprintf(sensor_conf->name, "therm_zone%d", data->id);
663	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
664	sensor_conf->write_emul_temp =
665		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
666	sensor_conf->driver_data = data;
667	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
668			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
669			pdata->trigger_enable[3];
670
671	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
672		sensor_conf->trip_data.trip_val[i] =
673			pdata->threshold + pdata->trigger_levels[i];
674		sensor_conf->trip_data.trip_type[i] =
675					pdata->trigger_type[i];
676	}
677
678	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
679
680	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
681	for (i = 0; i < pdata->freq_tab_count; i++) {
682		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
683					pdata->freq_tab[i].freq_clip_max;
684		sensor_conf->cooling_data.freq_data[i].temp_level =
685					pdata->freq_tab[i].temp_level;
686	}
687	sensor_conf->dev = &pdev->dev;
688	/* Register the sensor with thermal management interface */
689	ret = exynos_register_thermal(sensor_conf);
690	if (ret) {
691		dev_err(&pdev->dev, "Failed to register thermal interface\n");
692		goto err_clk;
693	}
694	data->reg_conf = sensor_conf;
695
696	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
697		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
698	if (ret) {
699		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
700		goto err_clk;
701	}
702
703	return 0;
704err_clk:
705	clk_unprepare(data->clk);
706	return ret;
707}
708
709static int exynos_tmu_remove(struct platform_device *pdev)
710{
711	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
712
713	exynos_tmu_control(pdev, false);
714
715	exynos_unregister_thermal(data->reg_conf);
716
717	clk_unprepare(data->clk);
718
719	if (!IS_ERR(data->regulator))
720		regulator_disable(data->regulator);
721
722	return 0;
723}
724
725#ifdef CONFIG_PM_SLEEP
726static int exynos_tmu_suspend(struct device *dev)
727{
728	exynos_tmu_control(to_platform_device(dev), false);
729
730	return 0;
731}
732
733static int exynos_tmu_resume(struct device *dev)
734{
735	struct platform_device *pdev = to_platform_device(dev);
736
737	exynos_tmu_initialize(pdev);
738	exynos_tmu_control(pdev, true);
739
740	return 0;
741}
742
743static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
744			 exynos_tmu_suspend, exynos_tmu_resume);
745#define EXYNOS_TMU_PM	(&exynos_tmu_pm)
746#else
747#define EXYNOS_TMU_PM	NULL
748#endif
749
750static struct platform_driver exynos_tmu_driver = {
751	.driver = {
752		.name   = "exynos-tmu",
753		.owner  = THIS_MODULE,
754		.pm     = EXYNOS_TMU_PM,
755		.of_match_table = exynos_tmu_match,
756	},
757	.probe = exynos_tmu_probe,
758	.remove	= exynos_tmu_remove,
759};
760
761module_platform_driver(exynos_tmu_driver);
762
763MODULE_DESCRIPTION("EXYNOS TMU Driver");
764MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
765MODULE_LICENSE("GPL");
766MODULE_ALIAS("platform:exynos-tmu");
767