]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/regmap/regmap-irq.c
regmap: irq: add support for chips who have separate unmask registers
[karo-tx-linux.git] / drivers / base / regmap / regmap-irq.c
1 /*
2  * regmap based irq_chip
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21
22 #include "internal.h"
23
24 struct regmap_irq_chip_data {
25         struct mutex lock;
26         struct irq_chip irq_chip;
27
28         struct regmap *map;
29         const struct regmap_irq_chip *chip;
30
31         int irq_base;
32         struct irq_domain *domain;
33
34         int irq;
35         int wake_count;
36
37         void *status_reg_buf;
38         unsigned int *status_buf;
39         unsigned int *mask_buf;
40         unsigned int *mask_buf_def;
41         unsigned int *wake_buf;
42
43         unsigned int irq_reg_stride;
44 };
45
46 static inline const
47 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
48                                      int irq)
49 {
50         return &data->chip->irqs[irq];
51 }
52
53 static void regmap_irq_lock(struct irq_data *data)
54 {
55         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
56
57         mutex_lock(&d->lock);
58 }
59
60 static void regmap_irq_sync_unlock(struct irq_data *data)
61 {
62         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
63         struct regmap *map = d->map;
64         int i, ret;
65         u32 reg;
66         u32 unmask_offset;
67
68         if (d->chip->runtime_pm) {
69                 ret = pm_runtime_get_sync(map->dev);
70                 if (ret < 0)
71                         dev_err(map->dev, "IRQ sync failed to resume: %d\n",
72                                 ret);
73         }
74
75         /*
76          * If there's been a change in the mask write it back to the
77          * hardware.  We rely on the use of the regmap core cache to
78          * suppress pointless writes.
79          */
80         for (i = 0; i < d->chip->num_regs; i++) {
81                 reg = d->chip->mask_base +
82                         (i * map->reg_stride * d->irq_reg_stride);
83                 if (d->chip->mask_invert) {
84                         ret = regmap_update_bits(d->map, reg,
85                                          d->mask_buf_def[i], ~d->mask_buf[i]);
86                 } else if (d->chip->unmask_base) {
87                         /* set mask with mask_base register */
88                         ret = regmap_update_bits(d->map, reg,
89                                         d->mask_buf_def[i], ~d->mask_buf[i]);
90                         if (ret < 0)
91                                 dev_err(d->map->dev,
92                                         "Failed to sync unmasks in %x\n",
93                                         reg);
94                         unmask_offset = d->chip->unmask_base -
95                                                         d->chip->mask_base;
96                         /* clear mask with unmask_base register */
97                         ret = regmap_update_bits(d->map,
98                                         reg + unmask_offset,
99                                         d->mask_buf_def[i],
100                                         d->mask_buf[i]);
101                 } else {
102                         ret = regmap_update_bits(d->map, reg,
103                                          d->mask_buf_def[i], d->mask_buf[i]);
104                 }
105                 if (ret != 0)
106                         dev_err(d->map->dev, "Failed to sync masks in %x\n",
107                                 reg);
108
109                 reg = d->chip->wake_base +
110                         (i * map->reg_stride * d->irq_reg_stride);
111                 if (d->wake_buf) {
112                         if (d->chip->wake_invert)
113                                 ret = regmap_update_bits(d->map, reg,
114                                                          d->mask_buf_def[i],
115                                                          ~d->wake_buf[i]);
116                         else
117                                 ret = regmap_update_bits(d->map, reg,
118                                                          d->mask_buf_def[i],
119                                                          d->wake_buf[i]);
120                         if (ret != 0)
121                                 dev_err(d->map->dev,
122                                         "Failed to sync wakes in %x: %d\n",
123                                         reg, ret);
124                 }
125
126                 if (!d->chip->init_ack_masked)
127                         continue;
128                 /*
129                  * Ack all the masked interrupts unconditionally,
130                  * OR if there is masked interrupt which hasn't been Acked,
131                  * it'll be ignored in irq handler, then may introduce irq storm
132                  */
133                 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
134                         reg = d->chip->ack_base +
135                                 (i * map->reg_stride * d->irq_reg_stride);
136                         ret = regmap_write(map, reg, d->mask_buf[i]);
137                         if (ret != 0)
138                                 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
139                                         reg, ret);
140                 }
141         }
142
143         if (d->chip->runtime_pm)
144                 pm_runtime_put(map->dev);
145
146         /* If we've changed our wakeup count propagate it to the parent */
147         if (d->wake_count < 0)
148                 for (i = d->wake_count; i < 0; i++)
149                         irq_set_irq_wake(d->irq, 0);
150         else if (d->wake_count > 0)
151                 for (i = 0; i < d->wake_count; i++)
152                         irq_set_irq_wake(d->irq, 1);
153
154         d->wake_count = 0;
155
156         mutex_unlock(&d->lock);
157 }
158
159 static void regmap_irq_enable(struct irq_data *data)
160 {
161         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
162         struct regmap *map = d->map;
163         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
164
165         d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
166 }
167
168 static void regmap_irq_disable(struct irq_data *data)
169 {
170         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
171         struct regmap *map = d->map;
172         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
173
174         d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
175 }
176
177 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
178 {
179         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
180         struct regmap *map = d->map;
181         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
182
183         if (on) {
184                 if (d->wake_buf)
185                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
186                                 &= ~irq_data->mask;
187                 d->wake_count++;
188         } else {
189                 if (d->wake_buf)
190                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
191                                 |= irq_data->mask;
192                 d->wake_count--;
193         }
194
195         return 0;
196 }
197
198 static const struct irq_chip regmap_irq_chip = {
199         .irq_bus_lock           = regmap_irq_lock,
200         .irq_bus_sync_unlock    = regmap_irq_sync_unlock,
201         .irq_disable            = regmap_irq_disable,
202         .irq_enable             = regmap_irq_enable,
203         .irq_set_wake           = regmap_irq_set_wake,
204 };
205
206 static irqreturn_t regmap_irq_thread(int irq, void *d)
207 {
208         struct regmap_irq_chip_data *data = d;
209         const struct regmap_irq_chip *chip = data->chip;
210         struct regmap *map = data->map;
211         int ret, i;
212         bool handled = false;
213         u32 reg;
214
215         if (chip->runtime_pm) {
216                 ret = pm_runtime_get_sync(map->dev);
217                 if (ret < 0) {
218                         dev_err(map->dev, "IRQ thread failed to resume: %d\n",
219                                 ret);
220                         pm_runtime_put(map->dev);
221                         return IRQ_NONE;
222                 }
223         }
224
225         /*
226          * Read in the statuses, using a single bulk read if possible
227          * in order to reduce the I/O overheads.
228          */
229         if (!map->use_single_read && map->reg_stride == 1 &&
230             data->irq_reg_stride == 1) {
231                 u8 *buf8 = data->status_reg_buf;
232                 u16 *buf16 = data->status_reg_buf;
233                 u32 *buf32 = data->status_reg_buf;
234
235                 BUG_ON(!data->status_reg_buf);
236
237                 ret = regmap_bulk_read(map, chip->status_base,
238                                        data->status_reg_buf,
239                                        chip->num_regs);
240                 if (ret != 0) {
241                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
242                                 ret);
243                         return IRQ_NONE;
244                 }
245
246                 for (i = 0; i < data->chip->num_regs; i++) {
247                         switch (map->format.val_bytes) {
248                         case 1:
249                                 data->status_buf[i] = buf8[i];
250                                 break;
251                         case 2:
252                                 data->status_buf[i] = buf16[i];
253                                 break;
254                         case 4:
255                                 data->status_buf[i] = buf32[i];
256                                 break;
257                         default:
258                                 BUG();
259                                 return IRQ_NONE;
260                         }
261                 }
262
263         } else {
264                 for (i = 0; i < data->chip->num_regs; i++) {
265                         ret = regmap_read(map, chip->status_base +
266                                           (i * map->reg_stride
267                                            * data->irq_reg_stride),
268                                           &data->status_buf[i]);
269
270                         if (ret != 0) {
271                                 dev_err(map->dev,
272                                         "Failed to read IRQ status: %d\n",
273                                         ret);
274                                 if (chip->runtime_pm)
275                                         pm_runtime_put(map->dev);
276                                 return IRQ_NONE;
277                         }
278                 }
279         }
280
281         /*
282          * Ignore masked IRQs and ack if we need to; we ack early so
283          * there is no race between handling and acknowleding the
284          * interrupt.  We assume that typically few of the interrupts
285          * will fire simultaneously so don't worry about overhead from
286          * doing a write per register.
287          */
288         for (i = 0; i < data->chip->num_regs; i++) {
289                 data->status_buf[i] &= ~data->mask_buf[i];
290
291                 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
292                         reg = chip->ack_base +
293                                 (i * map->reg_stride * data->irq_reg_stride);
294                         ret = regmap_write(map, reg, data->status_buf[i]);
295                         if (ret != 0)
296                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
297                                         reg, ret);
298                 }
299         }
300
301         for (i = 0; i < chip->num_irqs; i++) {
302                 if (data->status_buf[chip->irqs[i].reg_offset /
303                                      map->reg_stride] & chip->irqs[i].mask) {
304                         handle_nested_irq(irq_find_mapping(data->domain, i));
305                         handled = true;
306                 }
307         }
308
309         if (chip->runtime_pm)
310                 pm_runtime_put(map->dev);
311
312         if (handled)
313                 return IRQ_HANDLED;
314         else
315                 return IRQ_NONE;
316 }
317
318 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
319                           irq_hw_number_t hw)
320 {
321         struct regmap_irq_chip_data *data = h->host_data;
322
323         irq_set_chip_data(virq, data);
324         irq_set_chip(virq, &data->irq_chip);
325         irq_set_nested_thread(virq, 1);
326         irq_set_noprobe(virq);
327
328         return 0;
329 }
330
331 static const struct irq_domain_ops regmap_domain_ops = {
332         .map    = regmap_irq_map,
333         .xlate  = irq_domain_xlate_twocell,
334 };
335
336 /**
337  * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
338  *
339  * map:       The regmap for the device.
340  * irq:       The IRQ the device uses to signal interrupts
341  * irq_flags: The IRQF_ flags to use for the primary interrupt.
342  * chip:      Configuration for the interrupt controller.
343  * data:      Runtime data structure for the controller, allocated on success
344  *
345  * Returns 0 on success or an errno on failure.
346  *
347  * In order for this to be efficient the chip really should use a
348  * register cache.  The chip driver is responsible for restoring the
349  * register values used by the IRQ controller over suspend and resume.
350  */
351 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
352                         int irq_base, const struct regmap_irq_chip *chip,
353                         struct regmap_irq_chip_data **data)
354 {
355         struct regmap_irq_chip_data *d;
356         int i;
357         int ret = -ENOMEM;
358         u32 reg;
359         u32 unmask_offset;
360
361         if (chip->num_regs <= 0)
362                 return -EINVAL;
363
364         for (i = 0; i < chip->num_irqs; i++) {
365                 if (chip->irqs[i].reg_offset % map->reg_stride)
366                         return -EINVAL;
367                 if (chip->irqs[i].reg_offset / map->reg_stride >=
368                     chip->num_regs)
369                         return -EINVAL;
370         }
371
372         if (irq_base) {
373                 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
374                 if (irq_base < 0) {
375                         dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
376                                  irq_base);
377                         return irq_base;
378                 }
379         }
380
381         d = kzalloc(sizeof(*d), GFP_KERNEL);
382         if (!d)
383                 return -ENOMEM;
384
385         d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
386                                 GFP_KERNEL);
387         if (!d->status_buf)
388                 goto err_alloc;
389
390         d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
391                               GFP_KERNEL);
392         if (!d->mask_buf)
393                 goto err_alloc;
394
395         d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
396                                   GFP_KERNEL);
397         if (!d->mask_buf_def)
398                 goto err_alloc;
399
400         if (chip->wake_base) {
401                 d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
402                                       GFP_KERNEL);
403                 if (!d->wake_buf)
404                         goto err_alloc;
405         }
406
407         d->irq_chip = regmap_irq_chip;
408         d->irq_chip.name = chip->name;
409         d->irq = irq;
410         d->map = map;
411         d->chip = chip;
412         d->irq_base = irq_base;
413
414         if (chip->irq_reg_stride)
415                 d->irq_reg_stride = chip->irq_reg_stride;
416         else
417                 d->irq_reg_stride = 1;
418
419         if (!map->use_single_read && map->reg_stride == 1 &&
420             d->irq_reg_stride == 1) {
421                 d->status_reg_buf = kmalloc(map->format.val_bytes *
422                                             chip->num_regs, GFP_KERNEL);
423                 if (!d->status_reg_buf)
424                         goto err_alloc;
425         }
426
427         mutex_init(&d->lock);
428
429         for (i = 0; i < chip->num_irqs; i++)
430                 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
431                         |= chip->irqs[i].mask;
432
433         /* Mask all the interrupts by default */
434         for (i = 0; i < chip->num_regs; i++) {
435                 d->mask_buf[i] = d->mask_buf_def[i];
436                 reg = chip->mask_base +
437                         (i * map->reg_stride * d->irq_reg_stride);
438                 if (chip->mask_invert)
439                         ret = regmap_update_bits(map, reg,
440                                          d->mask_buf[i], ~d->mask_buf[i]);
441                 else if (d->chip->unmask_base) {
442                         unmask_offset = d->chip->unmask_base -
443                                         d->chip->mask_base;
444                         ret = regmap_update_bits(d->map,
445                                         reg + unmask_offset,
446                                         d->mask_buf[i],
447                                         d->mask_buf[i]);
448                 } else
449                         ret = regmap_update_bits(map, reg,
450                                          d->mask_buf[i], d->mask_buf[i]);
451                 if (ret != 0) {
452                         dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
453                                 reg, ret);
454                         goto err_alloc;
455                 }
456
457                 if (!chip->init_ack_masked)
458                         continue;
459
460                 /* Ack masked but set interrupts */
461                 reg = chip->status_base +
462                         (i * map->reg_stride * d->irq_reg_stride);
463                 ret = regmap_read(map, reg, &d->status_buf[i]);
464                 if (ret != 0) {
465                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
466                                 ret);
467                         goto err_alloc;
468                 }
469
470                 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
471                         reg = chip->ack_base +
472                                 (i * map->reg_stride * d->irq_reg_stride);
473                         ret = regmap_write(map, reg,
474                                         d->status_buf[i] & d->mask_buf[i]);
475                         if (ret != 0) {
476                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
477                                         reg, ret);
478                                 goto err_alloc;
479                         }
480                 }
481         }
482
483         /* Wake is disabled by default */
484         if (d->wake_buf) {
485                 for (i = 0; i < chip->num_regs; i++) {
486                         d->wake_buf[i] = d->mask_buf_def[i];
487                         reg = chip->wake_base +
488                                 (i * map->reg_stride * d->irq_reg_stride);
489
490                         if (chip->wake_invert)
491                                 ret = regmap_update_bits(map, reg,
492                                                          d->mask_buf_def[i],
493                                                          0);
494                         else
495                                 ret = regmap_update_bits(map, reg,
496                                                          d->mask_buf_def[i],
497                                                          d->wake_buf[i]);
498                         if (ret != 0) {
499                                 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
500                                         reg, ret);
501                                 goto err_alloc;
502                         }
503                 }
504         }
505
506         if (irq_base)
507                 d->domain = irq_domain_add_legacy(map->dev->of_node,
508                                                   chip->num_irqs, irq_base, 0,
509                                                   &regmap_domain_ops, d);
510         else
511                 d->domain = irq_domain_add_linear(map->dev->of_node,
512                                                   chip->num_irqs,
513                                                   &regmap_domain_ops, d);
514         if (!d->domain) {
515                 dev_err(map->dev, "Failed to create IRQ domain\n");
516                 ret = -ENOMEM;
517                 goto err_alloc;
518         }
519
520         ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
521                                    irq_flags | IRQF_ONESHOT,
522                                    chip->name, d);
523         if (ret != 0) {
524                 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
525                         irq, chip->name, ret);
526                 goto err_domain;
527         }
528
529         *data = d;
530
531         return 0;
532
533 err_domain:
534         /* Should really dispose of the domain but... */
535 err_alloc:
536         kfree(d->wake_buf);
537         kfree(d->mask_buf_def);
538         kfree(d->mask_buf);
539         kfree(d->status_buf);
540         kfree(d->status_reg_buf);
541         kfree(d);
542         return ret;
543 }
544 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
545
546 /**
547  * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
548  *
549  * @irq: Primary IRQ for the device
550  * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
551  */
552 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
553 {
554         if (!d)
555                 return;
556
557         free_irq(irq, d);
558         irq_domain_remove(d->domain);
559         kfree(d->wake_buf);
560         kfree(d->mask_buf_def);
561         kfree(d->mask_buf);
562         kfree(d->status_reg_buf);
563         kfree(d->status_buf);
564         kfree(d);
565 }
566 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
567
568 /**
569  * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
570  *
571  * Useful for drivers to request their own IRQs.
572  *
573  * @data: regmap_irq controller to operate on.
574  */
575 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
576 {
577         WARN_ON(!data->irq_base);
578         return data->irq_base;
579 }
580 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
581
582 /**
583  * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
584  *
585  * Useful for drivers to request their own IRQs.
586  *
587  * @data: regmap_irq controller to operate on.
588  * @irq: index of the interrupt requested in the chip IRQs
589  */
590 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
591 {
592         /* Handle holes in the IRQ list */
593         if (!data->chip->irqs[irq].mask)
594                 return -EINVAL;
595
596         return irq_create_mapping(data->domain, irq);
597 }
598 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
599
600 /**
601  * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
602  *
603  * Useful for drivers to request their own IRQs and for integration
604  * with subsystems.  For ease of integration NULL is accepted as a
605  * domain, allowing devices to just call this even if no domain is
606  * allocated.
607  *
608  * @data: regmap_irq controller to operate on.
609  */
610 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
611 {
612         if (data)
613                 return data->domain;
614         else
615                 return NULL;
616 }
617 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);