[go: nahoru, domu]

regmap-irq.c revision d3233433356aa1965b60b08ee61465b20e50474b
1/*
2 * regmap based irq_chip
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/export.h>
14#include <linux/device.h>
15#include <linux/regmap.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h>
18#include <linux/irqdomain.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21
22#include "internal.h"
23
24struct regmap_irq_chip_data {
25	struct mutex lock;
26	struct irq_chip irq_chip;
27
28	struct regmap *map;
29	const struct regmap_irq_chip *chip;
30
31	int irq_base;
32	struct irq_domain *domain;
33
34	int irq;
35	int wake_count;
36
37	void *status_reg_buf;
38	unsigned int *status_buf;
39	unsigned int *mask_buf;
40	unsigned int *mask_buf_def;
41	unsigned int *wake_buf;
42
43	unsigned int irq_reg_stride;
44};
45
46static inline const
47struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
48				     int irq)
49{
50	return &data->chip->irqs[irq];
51}
52
53static void regmap_irq_lock(struct irq_data *data)
54{
55	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
56
57	mutex_lock(&d->lock);
58}
59
60static void regmap_irq_sync_unlock(struct irq_data *data)
61{
62	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
63	struct regmap *map = d->map;
64	int i, ret;
65	u32 reg;
66
67	if (d->chip->runtime_pm) {
68		ret = pm_runtime_get_sync(map->dev);
69		if (ret < 0)
70			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
71				ret);
72	}
73
74	/*
75	 * If there's been a change in the mask write it back to the
76	 * hardware.  We rely on the use of the regmap core cache to
77	 * suppress pointless writes.
78	 */
79	for (i = 0; i < d->chip->num_regs; i++) {
80		reg = d->chip->mask_base +
81			(i * map->reg_stride * d->irq_reg_stride);
82		if (d->chip->mask_invert)
83			ret = regmap_update_bits(d->map, reg,
84					 d->mask_buf_def[i], ~d->mask_buf[i]);
85		else
86			ret = regmap_update_bits(d->map, reg,
87					 d->mask_buf_def[i], d->mask_buf[i]);
88		if (ret != 0)
89			dev_err(d->map->dev, "Failed to sync masks in %x\n",
90				reg);
91
92		reg = d->chip->wake_base +
93			(i * map->reg_stride * d->irq_reg_stride);
94		if (d->wake_buf) {
95			if (d->chip->wake_invert)
96				ret = regmap_update_bits(d->map, reg,
97							 d->mask_buf_def[i],
98							 ~d->wake_buf[i]);
99			else
100				ret = regmap_update_bits(d->map, reg,
101							 d->mask_buf_def[i],
102							 d->wake_buf[i]);
103			if (ret != 0)
104				dev_err(d->map->dev,
105					"Failed to sync wakes in %x: %d\n",
106					reg, ret);
107		}
108
109		if (!d->chip->init_ack_masked)
110			continue;
111		/*
112		 * Ack all the masked interrupts uncondictionly,
113		 * OR if there is masked interrupt which hasn't been Acked,
114		 * it'll be ignored in irq handler, then may introduce irq storm
115		 */
116		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
117			reg = d->chip->ack_base +
118				(i * map->reg_stride * d->irq_reg_stride);
119			ret = regmap_write(map, reg, d->mask_buf[i]);
120			if (ret != 0)
121				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
122					reg, ret);
123		}
124	}
125
126	if (d->chip->runtime_pm)
127		pm_runtime_put(map->dev);
128
129	/* If we've changed our wakeup count propagate it to the parent */
130	if (d->wake_count < 0)
131		for (i = d->wake_count; i < 0; i++)
132			irq_set_irq_wake(d->irq, 0);
133	else if (d->wake_count > 0)
134		for (i = 0; i < d->wake_count; i++)
135			irq_set_irq_wake(d->irq, 1);
136
137	d->wake_count = 0;
138
139	mutex_unlock(&d->lock);
140}
141
142static void regmap_irq_enable(struct irq_data *data)
143{
144	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
145	struct regmap *map = d->map;
146	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
147
148	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
149}
150
151static void regmap_irq_disable(struct irq_data *data)
152{
153	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
154	struct regmap *map = d->map;
155	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
156
157	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
158}
159
160static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
161{
162	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
163	struct regmap *map = d->map;
164	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
165
166	if (on) {
167		if (d->wake_buf)
168			d->wake_buf[irq_data->reg_offset / map->reg_stride]
169				&= ~irq_data->mask;
170		d->wake_count++;
171	} else {
172		if (d->wake_buf)
173			d->wake_buf[irq_data->reg_offset / map->reg_stride]
174				|= irq_data->mask;
175		d->wake_count--;
176	}
177
178	return 0;
179}
180
181static const struct irq_chip regmap_irq_chip = {
182	.irq_bus_lock		= regmap_irq_lock,
183	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
184	.irq_disable		= regmap_irq_disable,
185	.irq_enable		= regmap_irq_enable,
186	.irq_set_wake		= regmap_irq_set_wake,
187};
188
189static irqreturn_t regmap_irq_thread(int irq, void *d)
190{
191	struct regmap_irq_chip_data *data = d;
192	const struct regmap_irq_chip *chip = data->chip;
193	struct regmap *map = data->map;
194	int ret, i;
195	bool handled = false;
196	u32 reg;
197
198	if (chip->runtime_pm) {
199		ret = pm_runtime_get_sync(map->dev);
200		if (ret < 0) {
201			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
202				ret);
203			pm_runtime_put(map->dev);
204			return IRQ_NONE;
205		}
206	}
207
208	/*
209	 * Read in the statuses, using a single bulk read if possible
210	 * in order to reduce the I/O overheads.
211	 */
212	if (!map->use_single_rw && map->reg_stride == 1 &&
213	    data->irq_reg_stride == 1) {
214		u8 *buf8 = data->status_reg_buf;
215		u16 *buf16 = data->status_reg_buf;
216		u32 *buf32 = data->status_reg_buf;
217
218		BUG_ON(!data->status_reg_buf);
219
220		ret = regmap_bulk_read(map, chip->status_base,
221				       data->status_reg_buf,
222				       chip->num_regs);
223		if (ret != 0) {
224			dev_err(map->dev, "Failed to read IRQ status: %d\n",
225				ret);
226			return IRQ_NONE;
227		}
228
229		for (i = 0; i < data->chip->num_regs; i++) {
230			switch (map->format.val_bytes) {
231			case 1:
232				data->status_buf[i] = buf8[i];
233				break;
234			case 2:
235				data->status_buf[i] = buf16[i];
236				break;
237			case 4:
238				data->status_buf[i] = buf32[i];
239				break;
240			default:
241				BUG();
242				return IRQ_NONE;
243			}
244		}
245
246	} else {
247		for (i = 0; i < data->chip->num_regs; i++) {
248			ret = regmap_read(map, chip->status_base +
249					  (i * map->reg_stride
250					   * data->irq_reg_stride),
251					  &data->status_buf[i]);
252
253			if (ret != 0) {
254				dev_err(map->dev,
255					"Failed to read IRQ status: %d\n",
256					ret);
257				if (chip->runtime_pm)
258					pm_runtime_put(map->dev);
259				return IRQ_NONE;
260			}
261		}
262	}
263
264	/*
265	 * Ignore masked IRQs and ack if we need to; we ack early so
266	 * there is no race between handling and acknowleding the
267	 * interrupt.  We assume that typically few of the interrupts
268	 * will fire simultaneously so don't worry about overhead from
269	 * doing a write per register.
270	 */
271	for (i = 0; i < data->chip->num_regs; i++) {
272		data->status_buf[i] &= ~data->mask_buf[i];
273
274		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
275			reg = chip->ack_base +
276				(i * map->reg_stride * data->irq_reg_stride);
277			ret = regmap_write(map, reg, data->status_buf[i]);
278			if (ret != 0)
279				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
280					reg, ret);
281		}
282	}
283
284	for (i = 0; i < chip->num_irqs; i++) {
285		if (data->status_buf[chip->irqs[i].reg_offset /
286				     map->reg_stride] & chip->irqs[i].mask) {
287			handle_nested_irq(irq_find_mapping(data->domain, i));
288			handled = true;
289		}
290	}
291
292	if (chip->runtime_pm)
293		pm_runtime_put(map->dev);
294
295	if (handled)
296		return IRQ_HANDLED;
297	else
298		return IRQ_NONE;
299}
300
301static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
302			  irq_hw_number_t hw)
303{
304	struct regmap_irq_chip_data *data = h->host_data;
305
306	irq_set_chip_data(virq, data);
307	irq_set_chip(virq, &data->irq_chip);
308	irq_set_nested_thread(virq, 1);
309
310	/* ARM needs us to explicitly flag the IRQ as valid
311	 * and will set them noprobe when we do so. */
312#ifdef CONFIG_ARM
313	set_irq_flags(virq, IRQF_VALID);
314#else
315	irq_set_noprobe(virq);
316#endif
317
318	return 0;
319}
320
321static struct irq_domain_ops regmap_domain_ops = {
322	.map	= regmap_irq_map,
323	.xlate	= irq_domain_xlate_twocell,
324};
325
326/**
327 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
328 *
329 * map:       The regmap for the device.
330 * irq:       The IRQ the device uses to signal interrupts
331 * irq_flags: The IRQF_ flags to use for the primary interrupt.
332 * chip:      Configuration for the interrupt controller.
333 * data:      Runtime data structure for the controller, allocated on success
334 *
335 * Returns 0 on success or an errno on failure.
336 *
337 * In order for this to be efficient the chip really should use a
338 * register cache.  The chip driver is responsible for restoring the
339 * register values used by the IRQ controller over suspend and resume.
340 */
341int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
342			int irq_base, const struct regmap_irq_chip *chip,
343			struct regmap_irq_chip_data **data)
344{
345	struct regmap_irq_chip_data *d;
346	int i;
347	int ret = -ENOMEM;
348	u32 reg;
349
350	for (i = 0; i < chip->num_irqs; i++) {
351		if (chip->irqs[i].reg_offset % map->reg_stride)
352			return -EINVAL;
353		if (chip->irqs[i].reg_offset / map->reg_stride >=
354		    chip->num_regs)
355			return -EINVAL;
356	}
357
358	if (irq_base) {
359		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
360		if (irq_base < 0) {
361			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
362				 irq_base);
363			return irq_base;
364		}
365	}
366
367	d = kzalloc(sizeof(*d), GFP_KERNEL);
368	if (!d)
369		return -ENOMEM;
370
371	*data = d;
372
373	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
374				GFP_KERNEL);
375	if (!d->status_buf)
376		goto err_alloc;
377
378	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
379			      GFP_KERNEL);
380	if (!d->mask_buf)
381		goto err_alloc;
382
383	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
384				  GFP_KERNEL);
385	if (!d->mask_buf_def)
386		goto err_alloc;
387
388	if (chip->wake_base) {
389		d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
390				      GFP_KERNEL);
391		if (!d->wake_buf)
392			goto err_alloc;
393	}
394
395	d->irq_chip = regmap_irq_chip;
396	d->irq_chip.name = chip->name;
397	d->irq = irq;
398	d->map = map;
399	d->chip = chip;
400	d->irq_base = irq_base;
401
402	if (chip->irq_reg_stride)
403		d->irq_reg_stride = chip->irq_reg_stride;
404	else
405		d->irq_reg_stride = 1;
406
407	if (!map->use_single_rw && map->reg_stride == 1 &&
408	    d->irq_reg_stride == 1) {
409		d->status_reg_buf = kmalloc(map->format.val_bytes *
410					    chip->num_regs, GFP_KERNEL);
411		if (!d->status_reg_buf)
412			goto err_alloc;
413	}
414
415	mutex_init(&d->lock);
416
417	for (i = 0; i < chip->num_irqs; i++)
418		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
419			|= chip->irqs[i].mask;
420
421	/* Mask all the interrupts by default */
422	for (i = 0; i < chip->num_regs; i++) {
423		d->mask_buf[i] = d->mask_buf_def[i];
424		reg = chip->mask_base +
425			(i * map->reg_stride * d->irq_reg_stride);
426		if (chip->mask_invert)
427			ret = regmap_update_bits(map, reg,
428					 d->mask_buf[i], ~d->mask_buf[i]);
429		else
430			ret = regmap_update_bits(map, reg,
431					 d->mask_buf[i], d->mask_buf[i]);
432		if (ret != 0) {
433			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
434				reg, ret);
435			goto err_alloc;
436		}
437
438		if (!chip->init_ack_masked)
439			continue;
440
441		/* Ack masked but set interrupts */
442		reg = chip->status_base +
443			(i * map->reg_stride * d->irq_reg_stride);
444		ret = regmap_read(map, reg, &d->status_buf[i]);
445		if (ret != 0) {
446			dev_err(map->dev, "Failed to read IRQ status: %d\n",
447				ret);
448			goto err_alloc;
449		}
450
451		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
452			reg = chip->ack_base +
453				(i * map->reg_stride * d->irq_reg_stride);
454			ret = regmap_write(map, reg,
455					d->status_buf[i] & d->mask_buf[i]);
456			if (ret != 0) {
457				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
458					reg, ret);
459				goto err_alloc;
460			}
461		}
462	}
463
464	/* Wake is disabled by default */
465	if (d->wake_buf) {
466		for (i = 0; i < chip->num_regs; i++) {
467			d->wake_buf[i] = d->mask_buf_def[i];
468			reg = chip->wake_base +
469				(i * map->reg_stride * d->irq_reg_stride);
470
471			if (chip->wake_invert)
472				ret = regmap_update_bits(map, reg,
473							 d->mask_buf_def[i],
474							 0);
475			else
476				ret = regmap_update_bits(map, reg,
477							 d->mask_buf_def[i],
478							 d->wake_buf[i]);
479			if (ret != 0) {
480				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
481					reg, ret);
482				goto err_alloc;
483			}
484		}
485	}
486
487	if (irq_base)
488		d->domain = irq_domain_add_legacy(map->dev->of_node,
489						  chip->num_irqs, irq_base, 0,
490						  &regmap_domain_ops, d);
491	else
492		d->domain = irq_domain_add_linear(map->dev->of_node,
493						  chip->num_irqs,
494						  &regmap_domain_ops, d);
495	if (!d->domain) {
496		dev_err(map->dev, "Failed to create IRQ domain\n");
497		ret = -ENOMEM;
498		goto err_alloc;
499	}
500
501	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
502				   chip->name, d);
503	if (ret != 0) {
504		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
505			irq, chip->name, ret);
506		goto err_domain;
507	}
508
509	return 0;
510
511err_domain:
512	/* Should really dispose of the domain but... */
513err_alloc:
514	kfree(d->wake_buf);
515	kfree(d->mask_buf_def);
516	kfree(d->mask_buf);
517	kfree(d->status_buf);
518	kfree(d->status_reg_buf);
519	kfree(d);
520	return ret;
521}
522EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
523
524/**
525 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
526 *
527 * @irq: Primary IRQ for the device
528 * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
529 */
530void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
531{
532	if (!d)
533		return;
534
535	free_irq(irq, d);
536	/* We should unmap the domain but... */
537	kfree(d->wake_buf);
538	kfree(d->mask_buf_def);
539	kfree(d->mask_buf);
540	kfree(d->status_reg_buf);
541	kfree(d->status_buf);
542	kfree(d);
543}
544EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
545
546/**
547 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
548 *
549 * Useful for drivers to request their own IRQs.
550 *
551 * @data: regmap_irq controller to operate on.
552 */
553int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
554{
555	WARN_ON(!data->irq_base);
556	return data->irq_base;
557}
558EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
559
560/**
561 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
562 *
563 * Useful for drivers to request their own IRQs.
564 *
565 * @data: regmap_irq controller to operate on.
566 * @irq: index of the interrupt requested in the chip IRQs
567 */
568int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
569{
570	/* Handle holes in the IRQ list */
571	if (!data->chip->irqs[irq].mask)
572		return -EINVAL;
573
574	return irq_create_mapping(data->domain, irq);
575}
576EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
577
578/**
579 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
580 *
581 * Useful for drivers to request their own IRQs and for integration
582 * with subsystems.  For ease of integration NULL is accepted as a
583 * domain, allowing devices to just call this even if no domain is
584 * allocated.
585 *
586 * @data: regmap_irq controller to operate on.
587 */
588struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
589{
590	if (data)
591		return data->domain;
592	else
593		return NULL;
594}
595EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
596