]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/hwtracing/coresight/coresight-etm3x.c
Merge remote-tracking branch 'staging/staging-next'
[karo-tx-linux.git] / drivers / hwtracing / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/clk.h>
34 #include <asm/sections.h>
35
36 #include "coresight-etm.h"
37
38 static int boot_enable;
39 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
40
41 /* The number of ETM/PTM currently registered */
42 static int etm_count;
43 static struct etm_drvdata *etmdrvdata[NR_CPUS];
44
45 static inline void etm_writel(struct etm_drvdata *drvdata,
46                               u32 val, u32 off)
47 {
48         if (drvdata->use_cp14) {
49                 if (etm_writel_cp14(off, val)) {
50                         dev_err(drvdata->dev,
51                                 "invalid CP14 access to ETM reg: %#x", off);
52                 }
53         } else {
54                 writel_relaxed(val, drvdata->base + off);
55         }
56 }
57
58 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
59 {
60         u32 val;
61
62         if (drvdata->use_cp14) {
63                 if (etm_readl_cp14(off, &val)) {
64                         dev_err(drvdata->dev,
65                                 "invalid CP14 access to ETM reg: %#x", off);
66                 }
67         } else {
68                 val = readl_relaxed(drvdata->base + off);
69         }
70
71         return val;
72 }
73
74 /*
75  * Memory mapped writes to clear os lock are not supported on some processors
76  * and OS lock must be unlocked before any memory mapped access on such
77  * processors, otherwise memory mapped reads/writes will be invalid.
78  */
79 static void etm_os_unlock(void *info)
80 {
81         struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
82         /* Writing any value to ETMOSLAR unlocks the trace registers */
83         etm_writel(drvdata, 0x0, ETMOSLAR);
84         isb();
85 }
86
87 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
88 {
89         u32 etmcr;
90
91         /* Ensure pending cp14 accesses complete before setting pwrdwn */
92         mb();
93         isb();
94         etmcr = etm_readl(drvdata, ETMCR);
95         etmcr |= ETMCR_PWD_DWN;
96         etm_writel(drvdata, etmcr, ETMCR);
97 }
98
99 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
100 {
101         u32 etmcr;
102
103         etmcr = etm_readl(drvdata, ETMCR);
104         etmcr &= ~ETMCR_PWD_DWN;
105         etm_writel(drvdata, etmcr, ETMCR);
106         /* Ensure pwrup completes before subsequent cp14 accesses */
107         mb();
108         isb();
109 }
110
111 static void etm_set_pwrup(struct etm_drvdata *drvdata)
112 {
113         u32 etmpdcr;
114
115         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
116         etmpdcr |= ETMPDCR_PWD_UP;
117         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
118         /* Ensure pwrup completes before subsequent cp14 accesses */
119         mb();
120         isb();
121 }
122
123 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
124 {
125         u32 etmpdcr;
126
127         /* Ensure pending cp14 accesses complete before clearing pwrup */
128         mb();
129         isb();
130         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
131         etmpdcr &= ~ETMPDCR_PWD_UP;
132         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
133 }
134
135 /**
136  * coresight_timeout_etm - loop until a bit has changed to a specific state.
137  * @drvdata: etm's private data structure.
138  * @offset: address of a register, starting from @addr.
139  * @position: the position of the bit of interest.
140  * @value: the value the bit should have.
141  *
142  * Basically the same as @coresight_timeout except for the register access
143  * method where we have to account for CP14 configurations.
144
145  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
146  * TIMEOUT_US has elapsed, which ever happens first.
147  */
148
149 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
150                                   int position, int value)
151 {
152         int i;
153         u32 val;
154
155         for (i = TIMEOUT_US; i > 0; i--) {
156                 val = etm_readl(drvdata, offset);
157                 /* Waiting on the bit to go from 0 to 1 */
158                 if (value) {
159                         if (val & BIT(position))
160                                 return 0;
161                 /* Waiting on the bit to go from 1 to 0 */
162                 } else {
163                         if (!(val & BIT(position)))
164                                 return 0;
165                 }
166
167                 /*
168                  * Delay is arbitrary - the specification doesn't say how long
169                  * we are expected to wait.  Extra check required to make sure
170                  * we don't wait needlessly on the last iteration.
171                  */
172                 if (i - 1)
173                         udelay(1);
174         }
175
176         return -EAGAIN;
177 }
178
179
180 static void etm_set_prog(struct etm_drvdata *drvdata)
181 {
182         u32 etmcr;
183
184         etmcr = etm_readl(drvdata, ETMCR);
185         etmcr |= ETMCR_ETM_PRG;
186         etm_writel(drvdata, etmcr, ETMCR);
187         /*
188          * Recommended by spec for cp14 accesses to ensure etmcr write is
189          * complete before polling etmsr
190          */
191         isb();
192         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
193                 dev_err(drvdata->dev,
194                         "timeout observed when probing at offset %#x\n", ETMSR);
195         }
196 }
197
198 static void etm_clr_prog(struct etm_drvdata *drvdata)
199 {
200         u32 etmcr;
201
202         etmcr = etm_readl(drvdata, ETMCR);
203         etmcr &= ~ETMCR_ETM_PRG;
204         etm_writel(drvdata, etmcr, ETMCR);
205         /*
206          * Recommended by spec for cp14 accesses to ensure etmcr write is
207          * complete before polling etmsr
208          */
209         isb();
210         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
211                 dev_err(drvdata->dev,
212                         "timeout observed when probing at offset %#x\n", ETMSR);
213         }
214 }
215
216 static void etm_set_default(struct etm_drvdata *drvdata)
217 {
218         int i;
219
220         drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
221         drvdata->enable_event = ETM_HARD_WIRE_RES_A;
222
223         drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
224         drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
225         drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
226         drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
227         drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
228         drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
229         drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
230
231         for (i = 0; i < drvdata->nr_cntr; i++) {
232                 drvdata->cntr_rld_val[i] = 0x0;
233                 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
234                 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
235                 drvdata->cntr_val[i] = 0x0;
236         }
237
238         drvdata->seq_curr_state = 0x0;
239         drvdata->ctxid_idx = 0x0;
240         for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
241                 drvdata->ctxid_pid[i] = 0x0;
242                 drvdata->ctxid_vpid[i] = 0x0;
243         }
244
245         drvdata->ctxid_mask = 0x0;
246 }
247
248 static void etm_enable_hw(void *info)
249 {
250         int i;
251         u32 etmcr;
252         struct etm_drvdata *drvdata = info;
253
254         CS_UNLOCK(drvdata->base);
255
256         /* Turn engine on */
257         etm_clr_pwrdwn(drvdata);
258         /* Apply power to trace registers */
259         etm_set_pwrup(drvdata);
260         /* Make sure all registers are accessible */
261         etm_os_unlock(drvdata);
262
263         etm_set_prog(drvdata);
264
265         etmcr = etm_readl(drvdata, ETMCR);
266         etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
267         etmcr |= drvdata->port_size;
268         etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
269         etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
270         etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
271         etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
272         etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
273         etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
274         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
275                 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
276                 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
277         }
278         for (i = 0; i < drvdata->nr_cntr; i++) {
279                 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
280                 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
281                 etm_writel(drvdata, drvdata->cntr_rld_event[i],
282                            ETMCNTRLDEVRn(i));
283                 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
284         }
285         etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
286         etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
287         etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
288         etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
289         etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
290         etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
291         etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
292         for (i = 0; i < drvdata->nr_ext_out; i++)
293                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
294         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
295                 etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
296         etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
297         etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
298         /* No external input selected */
299         etm_writel(drvdata, 0x0, ETMEXTINSELR);
300         etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
301         /* No auxiliary control selected */
302         etm_writel(drvdata, 0x0, ETMAUXCR);
303         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
304         /* No VMID comparator value selected */
305         etm_writel(drvdata, 0x0, ETMVMIDCVR);
306
307         /* Ensures trace output is enabled from this ETM */
308         etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
309
310         etm_clr_prog(drvdata);
311         CS_LOCK(drvdata->base);
312
313         dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
314 }
315
316 static int etm_trace_id_simple(struct etm_drvdata *drvdata)
317 {
318         if (!drvdata->enable)
319                 return drvdata->traceid;
320
321         return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
322 }
323
324 static int etm_trace_id(struct coresight_device *csdev)
325 {
326         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
327         unsigned long flags;
328         int trace_id = -1;
329
330         if (!drvdata->enable)
331                 return drvdata->traceid;
332         pm_runtime_get_sync(csdev->dev.parent);
333
334         spin_lock_irqsave(&drvdata->spinlock, flags);
335
336         CS_UNLOCK(drvdata->base);
337         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
338         CS_LOCK(drvdata->base);
339
340         spin_unlock_irqrestore(&drvdata->spinlock, flags);
341         pm_runtime_put(csdev->dev.parent);
342
343         return trace_id;
344 }
345
346 static int etm_enable(struct coresight_device *csdev)
347 {
348         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
349         int ret;
350
351         pm_runtime_get_sync(csdev->dev.parent);
352         spin_lock(&drvdata->spinlock);
353
354         /*
355          * Configure the ETM only if the CPU is online.  If it isn't online
356          * hw configuration will take place when 'CPU_STARTING' is received
357          * in @etm_cpu_callback.
358          */
359         if (cpu_online(drvdata->cpu)) {
360                 ret = smp_call_function_single(drvdata->cpu,
361                                                etm_enable_hw, drvdata, 1);
362                 if (ret)
363                         goto err;
364         }
365
366         drvdata->enable = true;
367         drvdata->sticky_enable = true;
368
369         spin_unlock(&drvdata->spinlock);
370
371         dev_info(drvdata->dev, "ETM tracing enabled\n");
372         return 0;
373 err:
374         spin_unlock(&drvdata->spinlock);
375         pm_runtime_put(csdev->dev.parent);
376         return ret;
377 }
378
379 static void etm_disable_hw(void *info)
380 {
381         int i;
382         struct etm_drvdata *drvdata = info;
383
384         CS_UNLOCK(drvdata->base);
385         etm_set_prog(drvdata);
386
387         /* Program trace enable to low by using always false event */
388         etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
389
390         /* Read back sequencer and counters for post trace analysis */
391         drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
392
393         for (i = 0; i < drvdata->nr_cntr; i++)
394                 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
395
396         etm_set_pwrdwn(drvdata);
397         CS_LOCK(drvdata->base);
398
399         dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
400 }
401
402 static void etm_disable(struct coresight_device *csdev)
403 {
404         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
405
406         /*
407          * Taking hotplug lock here protects from clocks getting disabled
408          * with tracing being left on (crash scenario) if user disable occurs
409          * after cpu online mask indicates the cpu is offline but before the
410          * DYING hotplug callback is serviced by the ETM driver.
411          */
412         get_online_cpus();
413         spin_lock(&drvdata->spinlock);
414
415         /*
416          * Executing etm_disable_hw on the cpu whose ETM is being disabled
417          * ensures that register writes occur when cpu is powered.
418          */
419         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
420         drvdata->enable = false;
421
422         spin_unlock(&drvdata->spinlock);
423         put_online_cpus();
424         pm_runtime_put(csdev->dev.parent);
425
426         dev_info(drvdata->dev, "ETM tracing disabled\n");
427 }
428
429 static const struct coresight_ops_source etm_source_ops = {
430         .trace_id       = etm_trace_id,
431         .enable         = etm_enable,
432         .disable        = etm_disable,
433 };
434
435 static const struct coresight_ops etm_cs_ops = {
436         .source_ops     = &etm_source_ops,
437 };
438
439 static ssize_t nr_addr_cmp_show(struct device *dev,
440                                 struct device_attribute *attr, char *buf)
441 {
442         unsigned long val;
443         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
444
445         val = drvdata->nr_addr_cmp;
446         return sprintf(buf, "%#lx\n", val);
447 }
448 static DEVICE_ATTR_RO(nr_addr_cmp);
449
450 static ssize_t nr_cntr_show(struct device *dev,
451                             struct device_attribute *attr, char *buf)
452 {       unsigned long val;
453         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
454
455         val = drvdata->nr_cntr;
456         return sprintf(buf, "%#lx\n", val);
457 }
458 static DEVICE_ATTR_RO(nr_cntr);
459
460 static ssize_t nr_ctxid_cmp_show(struct device *dev,
461                                  struct device_attribute *attr, char *buf)
462 {
463         unsigned long val;
464         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
465
466         val = drvdata->nr_ctxid_cmp;
467         return sprintf(buf, "%#lx\n", val);
468 }
469 static DEVICE_ATTR_RO(nr_ctxid_cmp);
470
471 static ssize_t etmsr_show(struct device *dev,
472                           struct device_attribute *attr, char *buf)
473 {
474         unsigned long flags, val;
475         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
476
477         pm_runtime_get_sync(drvdata->dev);
478         spin_lock_irqsave(&drvdata->spinlock, flags);
479         CS_UNLOCK(drvdata->base);
480
481         val = etm_readl(drvdata, ETMSR);
482
483         CS_LOCK(drvdata->base);
484         spin_unlock_irqrestore(&drvdata->spinlock, flags);
485         pm_runtime_put(drvdata->dev);
486
487         return sprintf(buf, "%#lx\n", val);
488 }
489 static DEVICE_ATTR_RO(etmsr);
490
491 static ssize_t reset_store(struct device *dev,
492                            struct device_attribute *attr,
493                            const char *buf, size_t size)
494 {
495         int i, ret;
496         unsigned long val;
497         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
498
499         ret = kstrtoul(buf, 16, &val);
500         if (ret)
501                 return ret;
502
503         if (val) {
504                 spin_lock(&drvdata->spinlock);
505                 drvdata->mode = ETM_MODE_EXCLUDE;
506                 drvdata->ctrl = 0x0;
507                 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
508                 drvdata->startstop_ctrl = 0x0;
509                 drvdata->addr_idx = 0x0;
510                 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
511                         drvdata->addr_val[i] = 0x0;
512                         drvdata->addr_acctype[i] = 0x0;
513                         drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
514                 }
515                 drvdata->cntr_idx = 0x0;
516
517                 etm_set_default(drvdata);
518                 spin_unlock(&drvdata->spinlock);
519         }
520
521         return size;
522 }
523 static DEVICE_ATTR_WO(reset);
524
525 static ssize_t mode_show(struct device *dev,
526                          struct device_attribute *attr, char *buf)
527 {
528         unsigned long val;
529         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
530
531         val = drvdata->mode;
532         return sprintf(buf, "%#lx\n", val);
533 }
534
535 static ssize_t mode_store(struct device *dev,
536                           struct device_attribute *attr,
537                           const char *buf, size_t size)
538 {
539         int ret;
540         unsigned long val;
541         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
542
543         ret = kstrtoul(buf, 16, &val);
544         if (ret)
545                 return ret;
546
547         spin_lock(&drvdata->spinlock);
548         drvdata->mode = val & ETM_MODE_ALL;
549
550         if (drvdata->mode & ETM_MODE_EXCLUDE)
551                 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
552         else
553                 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
554
555         if (drvdata->mode & ETM_MODE_CYCACC)
556                 drvdata->ctrl |= ETMCR_CYC_ACC;
557         else
558                 drvdata->ctrl &= ~ETMCR_CYC_ACC;
559
560         if (drvdata->mode & ETM_MODE_STALL) {
561                 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
562                         dev_warn(drvdata->dev, "stall mode not supported\n");
563                         ret = -EINVAL;
564                         goto err_unlock;
565                 }
566                 drvdata->ctrl |= ETMCR_STALL_MODE;
567          } else
568                 drvdata->ctrl &= ~ETMCR_STALL_MODE;
569
570         if (drvdata->mode & ETM_MODE_TIMESTAMP) {
571                 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
572                         dev_warn(drvdata->dev, "timestamp not supported\n");
573                         ret = -EINVAL;
574                         goto err_unlock;
575                 }
576                 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
577         } else
578                 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
579
580         if (drvdata->mode & ETM_MODE_CTXID)
581                 drvdata->ctrl |= ETMCR_CTXID_SIZE;
582         else
583                 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
584         spin_unlock(&drvdata->spinlock);
585
586         return size;
587
588 err_unlock:
589         spin_unlock(&drvdata->spinlock);
590         return ret;
591 }
592 static DEVICE_ATTR_RW(mode);
593
594 static ssize_t trigger_event_show(struct device *dev,
595                                   struct device_attribute *attr, char *buf)
596 {
597         unsigned long val;
598         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
599
600         val = drvdata->trigger_event;
601         return sprintf(buf, "%#lx\n", val);
602 }
603
604 static ssize_t trigger_event_store(struct device *dev,
605                                    struct device_attribute *attr,
606                                    const char *buf, size_t size)
607 {
608         int ret;
609         unsigned long val;
610         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
611
612         ret = kstrtoul(buf, 16, &val);
613         if (ret)
614                 return ret;
615
616         drvdata->trigger_event = val & ETM_EVENT_MASK;
617
618         return size;
619 }
620 static DEVICE_ATTR_RW(trigger_event);
621
622 static ssize_t enable_event_show(struct device *dev,
623                                  struct device_attribute *attr, char *buf)
624 {
625         unsigned long val;
626         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
627
628         val = drvdata->enable_event;
629         return sprintf(buf, "%#lx\n", val);
630 }
631
632 static ssize_t enable_event_store(struct device *dev,
633                                   struct device_attribute *attr,
634                                   const char *buf, size_t size)
635 {
636         int ret;
637         unsigned long val;
638         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
639
640         ret = kstrtoul(buf, 16, &val);
641         if (ret)
642                 return ret;
643
644         drvdata->enable_event = val & ETM_EVENT_MASK;
645
646         return size;
647 }
648 static DEVICE_ATTR_RW(enable_event);
649
650 static ssize_t fifofull_level_show(struct device *dev,
651                                    struct device_attribute *attr, char *buf)
652 {
653         unsigned long val;
654         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
655
656         val = drvdata->fifofull_level;
657         return sprintf(buf, "%#lx\n", val);
658 }
659
660 static ssize_t fifofull_level_store(struct device *dev,
661                                     struct device_attribute *attr,
662                                     const char *buf, size_t size)
663 {
664         int ret;
665         unsigned long val;
666         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
667
668         ret = kstrtoul(buf, 16, &val);
669         if (ret)
670                 return ret;
671
672         drvdata->fifofull_level = val;
673
674         return size;
675 }
676 static DEVICE_ATTR_RW(fifofull_level);
677
678 static ssize_t addr_idx_show(struct device *dev,
679                              struct device_attribute *attr, char *buf)
680 {
681         unsigned long val;
682         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
683
684         val = drvdata->addr_idx;
685         return sprintf(buf, "%#lx\n", val);
686 }
687
688 static ssize_t addr_idx_store(struct device *dev,
689                               struct device_attribute *attr,
690                               const char *buf, size_t size)
691 {
692         int ret;
693         unsigned long val;
694         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
695
696         ret = kstrtoul(buf, 16, &val);
697         if (ret)
698                 return ret;
699
700         if (val >= drvdata->nr_addr_cmp)
701                 return -EINVAL;
702
703         /*
704          * Use spinlock to ensure index doesn't change while it gets
705          * dereferenced multiple times within a spinlock block elsewhere.
706          */
707         spin_lock(&drvdata->spinlock);
708         drvdata->addr_idx = val;
709         spin_unlock(&drvdata->spinlock);
710
711         return size;
712 }
713 static DEVICE_ATTR_RW(addr_idx);
714
715 static ssize_t addr_single_show(struct device *dev,
716                                 struct device_attribute *attr, char *buf)
717 {
718         u8 idx;
719         unsigned long val;
720         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
721
722         spin_lock(&drvdata->spinlock);
723         idx = drvdata->addr_idx;
724         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
725               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
726                 spin_unlock(&drvdata->spinlock);
727                 return -EINVAL;
728         }
729
730         val = drvdata->addr_val[idx];
731         spin_unlock(&drvdata->spinlock);
732
733         return sprintf(buf, "%#lx\n", val);
734 }
735
736 static ssize_t addr_single_store(struct device *dev,
737                                  struct device_attribute *attr,
738                                  const char *buf, size_t size)
739 {
740         u8 idx;
741         int ret;
742         unsigned long val;
743         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
744
745         ret = kstrtoul(buf, 16, &val);
746         if (ret)
747                 return ret;
748
749         spin_lock(&drvdata->spinlock);
750         idx = drvdata->addr_idx;
751         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
752               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
753                 spin_unlock(&drvdata->spinlock);
754                 return -EINVAL;
755         }
756
757         drvdata->addr_val[idx] = val;
758         drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
759         spin_unlock(&drvdata->spinlock);
760
761         return size;
762 }
763 static DEVICE_ATTR_RW(addr_single);
764
765 static ssize_t addr_range_show(struct device *dev,
766                                struct device_attribute *attr, char *buf)
767 {
768         u8 idx;
769         unsigned long val1, val2;
770         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
771
772         spin_lock(&drvdata->spinlock);
773         idx = drvdata->addr_idx;
774         if (idx % 2 != 0) {
775                 spin_unlock(&drvdata->spinlock);
776                 return -EPERM;
777         }
778         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
779                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
780               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
781                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
782                 spin_unlock(&drvdata->spinlock);
783                 return -EPERM;
784         }
785
786         val1 = drvdata->addr_val[idx];
787         val2 = drvdata->addr_val[idx + 1];
788         spin_unlock(&drvdata->spinlock);
789
790         return sprintf(buf, "%#lx %#lx\n", val1, val2);
791 }
792
793 static ssize_t addr_range_store(struct device *dev,
794                               struct device_attribute *attr,
795                               const char *buf, size_t size)
796 {
797         u8 idx;
798         unsigned long val1, val2;
799         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
800
801         if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
802                 return -EINVAL;
803         /* Lower address comparator cannot have a higher address value */
804         if (val1 > val2)
805                 return -EINVAL;
806
807         spin_lock(&drvdata->spinlock);
808         idx = drvdata->addr_idx;
809         if (idx % 2 != 0) {
810                 spin_unlock(&drvdata->spinlock);
811                 return -EPERM;
812         }
813         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
814                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
815               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
816                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
817                 spin_unlock(&drvdata->spinlock);
818                 return -EPERM;
819         }
820
821         drvdata->addr_val[idx] = val1;
822         drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
823         drvdata->addr_val[idx + 1] = val2;
824         drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
825         drvdata->enable_ctrl1 |= (1 << (idx/2));
826         spin_unlock(&drvdata->spinlock);
827
828         return size;
829 }
830 static DEVICE_ATTR_RW(addr_range);
831
832 static ssize_t addr_start_show(struct device *dev,
833                                struct device_attribute *attr, char *buf)
834 {
835         u8 idx;
836         unsigned long val;
837         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
838
839         spin_lock(&drvdata->spinlock);
840         idx = drvdata->addr_idx;
841         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
842               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
843                 spin_unlock(&drvdata->spinlock);
844                 return -EPERM;
845         }
846
847         val = drvdata->addr_val[idx];
848         spin_unlock(&drvdata->spinlock);
849
850         return sprintf(buf, "%#lx\n", val);
851 }
852
853 static ssize_t addr_start_store(struct device *dev,
854                                 struct device_attribute *attr,
855                                 const char *buf, size_t size)
856 {
857         u8 idx;
858         int ret;
859         unsigned long val;
860         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
861
862         ret = kstrtoul(buf, 16, &val);
863         if (ret)
864                 return ret;
865
866         spin_lock(&drvdata->spinlock);
867         idx = drvdata->addr_idx;
868         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
869               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
870                 spin_unlock(&drvdata->spinlock);
871                 return -EPERM;
872         }
873
874         drvdata->addr_val[idx] = val;
875         drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
876         drvdata->startstop_ctrl |= (1 << idx);
877         drvdata->enable_ctrl1 |= BIT(25);
878         spin_unlock(&drvdata->spinlock);
879
880         return size;
881 }
882 static DEVICE_ATTR_RW(addr_start);
883
884 static ssize_t addr_stop_show(struct device *dev,
885                               struct device_attribute *attr, char *buf)
886 {
887         u8 idx;
888         unsigned long val;
889         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
890
891         spin_lock(&drvdata->spinlock);
892         idx = drvdata->addr_idx;
893         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
894               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
895                 spin_unlock(&drvdata->spinlock);
896                 return -EPERM;
897         }
898
899         val = drvdata->addr_val[idx];
900         spin_unlock(&drvdata->spinlock);
901
902         return sprintf(buf, "%#lx\n", val);
903 }
904
905 static ssize_t addr_stop_store(struct device *dev,
906                                struct device_attribute *attr,
907                                const char *buf, size_t size)
908 {
909         u8 idx;
910         int ret;
911         unsigned long val;
912         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
913
914         ret = kstrtoul(buf, 16, &val);
915         if (ret)
916                 return ret;
917
918         spin_lock(&drvdata->spinlock);
919         idx = drvdata->addr_idx;
920         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
921               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
922                 spin_unlock(&drvdata->spinlock);
923                 return -EPERM;
924         }
925
926         drvdata->addr_val[idx] = val;
927         drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
928         drvdata->startstop_ctrl |= (1 << (idx + 16));
929         drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
930         spin_unlock(&drvdata->spinlock);
931
932         return size;
933 }
934 static DEVICE_ATTR_RW(addr_stop);
935
936 static ssize_t addr_acctype_show(struct device *dev,
937                                  struct device_attribute *attr, char *buf)
938 {
939         unsigned long val;
940         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
941
942         spin_lock(&drvdata->spinlock);
943         val = drvdata->addr_acctype[drvdata->addr_idx];
944         spin_unlock(&drvdata->spinlock);
945
946         return sprintf(buf, "%#lx\n", val);
947 }
948
949 static ssize_t addr_acctype_store(struct device *dev,
950                                   struct device_attribute *attr,
951                                   const char *buf, size_t size)
952 {
953         int ret;
954         unsigned long val;
955         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
956
957         ret = kstrtoul(buf, 16, &val);
958         if (ret)
959                 return ret;
960
961         spin_lock(&drvdata->spinlock);
962         drvdata->addr_acctype[drvdata->addr_idx] = val;
963         spin_unlock(&drvdata->spinlock);
964
965         return size;
966 }
967 static DEVICE_ATTR_RW(addr_acctype);
968
969 static ssize_t cntr_idx_show(struct device *dev,
970                              struct device_attribute *attr, char *buf)
971 {
972         unsigned long val;
973         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
974
975         val = drvdata->cntr_idx;
976         return sprintf(buf, "%#lx\n", val);
977 }
978
979 static ssize_t cntr_idx_store(struct device *dev,
980                               struct device_attribute *attr,
981                               const char *buf, size_t size)
982 {
983         int ret;
984         unsigned long val;
985         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
986
987         ret = kstrtoul(buf, 16, &val);
988         if (ret)
989                 return ret;
990
991         if (val >= drvdata->nr_cntr)
992                 return -EINVAL;
993         /*
994          * Use spinlock to ensure index doesn't change while it gets
995          * dereferenced multiple times within a spinlock block elsewhere.
996          */
997         spin_lock(&drvdata->spinlock);
998         drvdata->cntr_idx = val;
999         spin_unlock(&drvdata->spinlock);
1000
1001         return size;
1002 }
1003 static DEVICE_ATTR_RW(cntr_idx);
1004
1005 static ssize_t cntr_rld_val_show(struct device *dev,
1006                                  struct device_attribute *attr, char *buf)
1007 {
1008         unsigned long val;
1009         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1010
1011         spin_lock(&drvdata->spinlock);
1012         val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1013         spin_unlock(&drvdata->spinlock);
1014
1015         return sprintf(buf, "%#lx\n", val);
1016 }
1017
1018 static ssize_t cntr_rld_val_store(struct device *dev,
1019                                   struct device_attribute *attr,
1020                                   const char *buf, size_t size)
1021 {
1022         int ret;
1023         unsigned long val;
1024         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025
1026         ret = kstrtoul(buf, 16, &val);
1027         if (ret)
1028                 return ret;
1029
1030         spin_lock(&drvdata->spinlock);
1031         drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1032         spin_unlock(&drvdata->spinlock);
1033
1034         return size;
1035 }
1036 static DEVICE_ATTR_RW(cntr_rld_val);
1037
1038 static ssize_t cntr_event_show(struct device *dev,
1039                                struct device_attribute *attr, char *buf)
1040 {
1041         unsigned long val;
1042         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1043
1044         spin_lock(&drvdata->spinlock);
1045         val = drvdata->cntr_event[drvdata->cntr_idx];
1046         spin_unlock(&drvdata->spinlock);
1047
1048         return sprintf(buf, "%#lx\n", val);
1049 }
1050
1051 static ssize_t cntr_event_store(struct device *dev,
1052                                 struct device_attribute *attr,
1053                                 const char *buf, size_t size)
1054 {
1055         int ret;
1056         unsigned long val;
1057         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1058
1059         ret = kstrtoul(buf, 16, &val);
1060         if (ret)
1061                 return ret;
1062
1063         spin_lock(&drvdata->spinlock);
1064         drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1065         spin_unlock(&drvdata->spinlock);
1066
1067         return size;
1068 }
1069 static DEVICE_ATTR_RW(cntr_event);
1070
1071 static ssize_t cntr_rld_event_show(struct device *dev,
1072                                    struct device_attribute *attr, char *buf)
1073 {
1074         unsigned long val;
1075         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1076
1077         spin_lock(&drvdata->spinlock);
1078         val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1079         spin_unlock(&drvdata->spinlock);
1080
1081         return sprintf(buf, "%#lx\n", val);
1082 }
1083
1084 static ssize_t cntr_rld_event_store(struct device *dev,
1085                                     struct device_attribute *attr,
1086                                     const char *buf, size_t size)
1087 {
1088         int ret;
1089         unsigned long val;
1090         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1091
1092         ret = kstrtoul(buf, 16, &val);
1093         if (ret)
1094                 return ret;
1095
1096         spin_lock(&drvdata->spinlock);
1097         drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1098         spin_unlock(&drvdata->spinlock);
1099
1100         return size;
1101 }
1102 static DEVICE_ATTR_RW(cntr_rld_event);
1103
1104 static ssize_t cntr_val_show(struct device *dev,
1105                              struct device_attribute *attr, char *buf)
1106 {
1107         int i, ret = 0;
1108         u32 val;
1109         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1110
1111         if (!drvdata->enable) {
1112                 spin_lock(&drvdata->spinlock);
1113                 for (i = 0; i < drvdata->nr_cntr; i++)
1114                         ret += sprintf(buf, "counter %d: %x\n",
1115                                        i, drvdata->cntr_val[i]);
1116                 spin_unlock(&drvdata->spinlock);
1117                 return ret;
1118         }
1119
1120         for (i = 0; i < drvdata->nr_cntr; i++) {
1121                 val = etm_readl(drvdata, ETMCNTVRn(i));
1122                 ret += sprintf(buf, "counter %d: %x\n", i, val);
1123         }
1124
1125         return ret;
1126 }
1127
1128 static ssize_t cntr_val_store(struct device *dev,
1129                               struct device_attribute *attr,
1130                               const char *buf, size_t size)
1131 {
1132         int ret;
1133         unsigned long val;
1134         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1135
1136         ret = kstrtoul(buf, 16, &val);
1137         if (ret)
1138                 return ret;
1139
1140         spin_lock(&drvdata->spinlock);
1141         drvdata->cntr_val[drvdata->cntr_idx] = val;
1142         spin_unlock(&drvdata->spinlock);
1143
1144         return size;
1145 }
1146 static DEVICE_ATTR_RW(cntr_val);
1147
1148 static ssize_t seq_12_event_show(struct device *dev,
1149                                  struct device_attribute *attr, char *buf)
1150 {
1151         unsigned long val;
1152         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1153
1154         val = drvdata->seq_12_event;
1155         return sprintf(buf, "%#lx\n", val);
1156 }
1157
1158 static ssize_t seq_12_event_store(struct device *dev,
1159                                   struct device_attribute *attr,
1160                                   const char *buf, size_t size)
1161 {
1162         int ret;
1163         unsigned long val;
1164         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1165
1166         ret = kstrtoul(buf, 16, &val);
1167         if (ret)
1168                 return ret;
1169
1170         drvdata->seq_12_event = val & ETM_EVENT_MASK;
1171         return size;
1172 }
1173 static DEVICE_ATTR_RW(seq_12_event);
1174
1175 static ssize_t seq_21_event_show(struct device *dev,
1176                                  struct device_attribute *attr, char *buf)
1177 {
1178         unsigned long val;
1179         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1180
1181         val = drvdata->seq_21_event;
1182         return sprintf(buf, "%#lx\n", val);
1183 }
1184
1185 static ssize_t seq_21_event_store(struct device *dev,
1186                                   struct device_attribute *attr,
1187                                   const char *buf, size_t size)
1188 {
1189         int ret;
1190         unsigned long val;
1191         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1192
1193         ret = kstrtoul(buf, 16, &val);
1194         if (ret)
1195                 return ret;
1196
1197         drvdata->seq_21_event = val & ETM_EVENT_MASK;
1198         return size;
1199 }
1200 static DEVICE_ATTR_RW(seq_21_event);
1201
1202 static ssize_t seq_23_event_show(struct device *dev,
1203                                  struct device_attribute *attr, char *buf)
1204 {
1205         unsigned long val;
1206         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1207
1208         val = drvdata->seq_23_event;
1209         return sprintf(buf, "%#lx\n", val);
1210 }
1211
1212 static ssize_t seq_23_event_store(struct device *dev,
1213                                   struct device_attribute *attr,
1214                                   const char *buf, size_t size)
1215 {
1216         int ret;
1217         unsigned long val;
1218         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1219
1220         ret = kstrtoul(buf, 16, &val);
1221         if (ret)
1222                 return ret;
1223
1224         drvdata->seq_23_event = val & ETM_EVENT_MASK;
1225         return size;
1226 }
1227 static DEVICE_ATTR_RW(seq_23_event);
1228
1229 static ssize_t seq_31_event_show(struct device *dev,
1230                                  struct device_attribute *attr, char *buf)
1231 {
1232         unsigned long val;
1233         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1234
1235         val = drvdata->seq_31_event;
1236         return sprintf(buf, "%#lx\n", val);
1237 }
1238
1239 static ssize_t seq_31_event_store(struct device *dev,
1240                                   struct device_attribute *attr,
1241                                   const char *buf, size_t size)
1242 {
1243         int ret;
1244         unsigned long val;
1245         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1246
1247         ret = kstrtoul(buf, 16, &val);
1248         if (ret)
1249                 return ret;
1250
1251         drvdata->seq_31_event = val & ETM_EVENT_MASK;
1252         return size;
1253 }
1254 static DEVICE_ATTR_RW(seq_31_event);
1255
1256 static ssize_t seq_32_event_show(struct device *dev,
1257                                  struct device_attribute *attr, char *buf)
1258 {
1259         unsigned long val;
1260         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1261
1262         val = drvdata->seq_32_event;
1263         return sprintf(buf, "%#lx\n", val);
1264 }
1265
1266 static ssize_t seq_32_event_store(struct device *dev,
1267                                   struct device_attribute *attr,
1268                                   const char *buf, size_t size)
1269 {
1270         int ret;
1271         unsigned long val;
1272         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273
1274         ret = kstrtoul(buf, 16, &val);
1275         if (ret)
1276                 return ret;
1277
1278         drvdata->seq_32_event = val & ETM_EVENT_MASK;
1279         return size;
1280 }
1281 static DEVICE_ATTR_RW(seq_32_event);
1282
1283 static ssize_t seq_13_event_show(struct device *dev,
1284                                  struct device_attribute *attr, char *buf)
1285 {
1286         unsigned long val;
1287         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1288
1289         val = drvdata->seq_13_event;
1290         return sprintf(buf, "%#lx\n", val);
1291 }
1292
1293 static ssize_t seq_13_event_store(struct device *dev,
1294                                   struct device_attribute *attr,
1295                                   const char *buf, size_t size)
1296 {
1297         int ret;
1298         unsigned long val;
1299         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1300
1301         ret = kstrtoul(buf, 16, &val);
1302         if (ret)
1303                 return ret;
1304
1305         drvdata->seq_13_event = val & ETM_EVENT_MASK;
1306         return size;
1307 }
1308 static DEVICE_ATTR_RW(seq_13_event);
1309
1310 static ssize_t seq_curr_state_show(struct device *dev,
1311                                    struct device_attribute *attr, char *buf)
1312 {
1313         unsigned long val, flags;
1314         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1315
1316         if (!drvdata->enable) {
1317                 val = drvdata->seq_curr_state;
1318                 goto out;
1319         }
1320
1321         pm_runtime_get_sync(drvdata->dev);
1322         spin_lock_irqsave(&drvdata->spinlock, flags);
1323
1324         CS_UNLOCK(drvdata->base);
1325         val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1326         CS_LOCK(drvdata->base);
1327
1328         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1329         pm_runtime_put(drvdata->dev);
1330 out:
1331         return sprintf(buf, "%#lx\n", val);
1332 }
1333
1334 static ssize_t seq_curr_state_store(struct device *dev,
1335                                     struct device_attribute *attr,
1336                                     const char *buf, size_t size)
1337 {
1338         int ret;
1339         unsigned long val;
1340         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1341
1342         ret = kstrtoul(buf, 16, &val);
1343         if (ret)
1344                 return ret;
1345
1346         if (val > ETM_SEQ_STATE_MAX_VAL)
1347                 return -EINVAL;
1348
1349         drvdata->seq_curr_state = val;
1350
1351         return size;
1352 }
1353 static DEVICE_ATTR_RW(seq_curr_state);
1354
1355 static ssize_t ctxid_idx_show(struct device *dev,
1356                               struct device_attribute *attr, char *buf)
1357 {
1358         unsigned long val;
1359         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1360
1361         val = drvdata->ctxid_idx;
1362         return sprintf(buf, "%#lx\n", val);
1363 }
1364
1365 static ssize_t ctxid_idx_store(struct device *dev,
1366                                 struct device_attribute *attr,
1367                                 const char *buf, size_t size)
1368 {
1369         int ret;
1370         unsigned long val;
1371         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1372
1373         ret = kstrtoul(buf, 16, &val);
1374         if (ret)
1375                 return ret;
1376
1377         if (val >= drvdata->nr_ctxid_cmp)
1378                 return -EINVAL;
1379
1380         /*
1381          * Use spinlock to ensure index doesn't change while it gets
1382          * dereferenced multiple times within a spinlock block elsewhere.
1383          */
1384         spin_lock(&drvdata->spinlock);
1385         drvdata->ctxid_idx = val;
1386         spin_unlock(&drvdata->spinlock);
1387
1388         return size;
1389 }
1390 static DEVICE_ATTR_RW(ctxid_idx);
1391
1392 static ssize_t ctxid_pid_show(struct device *dev,
1393                               struct device_attribute *attr, char *buf)
1394 {
1395         unsigned long val;
1396         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1397
1398         spin_lock(&drvdata->spinlock);
1399         val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
1400         spin_unlock(&drvdata->spinlock);
1401
1402         return sprintf(buf, "%#lx\n", val);
1403 }
1404
1405 static ssize_t ctxid_pid_store(struct device *dev,
1406                                struct device_attribute *attr,
1407                                const char *buf, size_t size)
1408 {
1409         int ret;
1410         unsigned long vpid, pid;
1411         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1412
1413         ret = kstrtoul(buf, 16, &vpid);
1414         if (ret)
1415                 return ret;
1416
1417         pid = coresight_vpid_to_pid(vpid);
1418
1419         spin_lock(&drvdata->spinlock);
1420         drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
1421         drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
1422         spin_unlock(&drvdata->spinlock);
1423
1424         return size;
1425 }
1426 static DEVICE_ATTR_RW(ctxid_pid);
1427
1428 static ssize_t ctxid_mask_show(struct device *dev,
1429                                struct device_attribute *attr, char *buf)
1430 {
1431         unsigned long val;
1432         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1433
1434         val = drvdata->ctxid_mask;
1435         return sprintf(buf, "%#lx\n", val);
1436 }
1437
1438 static ssize_t ctxid_mask_store(struct device *dev,
1439                                 struct device_attribute *attr,
1440                                 const char *buf, size_t size)
1441 {
1442         int ret;
1443         unsigned long val;
1444         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445
1446         ret = kstrtoul(buf, 16, &val);
1447         if (ret)
1448                 return ret;
1449
1450         drvdata->ctxid_mask = val;
1451         return size;
1452 }
1453 static DEVICE_ATTR_RW(ctxid_mask);
1454
1455 static ssize_t sync_freq_show(struct device *dev,
1456                               struct device_attribute *attr, char *buf)
1457 {
1458         unsigned long val;
1459         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1460
1461         val = drvdata->sync_freq;
1462         return sprintf(buf, "%#lx\n", val);
1463 }
1464
1465 static ssize_t sync_freq_store(struct device *dev,
1466                                struct device_attribute *attr,
1467                                const char *buf, size_t size)
1468 {
1469         int ret;
1470         unsigned long val;
1471         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472
1473         ret = kstrtoul(buf, 16, &val);
1474         if (ret)
1475                 return ret;
1476
1477         drvdata->sync_freq = val & ETM_SYNC_MASK;
1478         return size;
1479 }
1480 static DEVICE_ATTR_RW(sync_freq);
1481
1482 static ssize_t timestamp_event_show(struct device *dev,
1483                                     struct device_attribute *attr, char *buf)
1484 {
1485         unsigned long val;
1486         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1487
1488         val = drvdata->timestamp_event;
1489         return sprintf(buf, "%#lx\n", val);
1490 }
1491
1492 static ssize_t timestamp_event_store(struct device *dev,
1493                                      struct device_attribute *attr,
1494                                      const char *buf, size_t size)
1495 {
1496         int ret;
1497         unsigned long val;
1498         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499
1500         ret = kstrtoul(buf, 16, &val);
1501         if (ret)
1502                 return ret;
1503
1504         drvdata->timestamp_event = val & ETM_EVENT_MASK;
1505         return size;
1506 }
1507 static DEVICE_ATTR_RW(timestamp_event);
1508
1509 static ssize_t status_show(struct device *dev,
1510                            struct device_attribute *attr, char *buf)
1511 {
1512         int ret;
1513         unsigned long flags;
1514         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515
1516         pm_runtime_get_sync(drvdata->dev);
1517         spin_lock_irqsave(&drvdata->spinlock, flags);
1518
1519         CS_UNLOCK(drvdata->base);
1520         ret = sprintf(buf,
1521                       "ETMCCR: 0x%08x\n"
1522                       "ETMCCER: 0x%08x\n"
1523                       "ETMSCR: 0x%08x\n"
1524                       "ETMIDR: 0x%08x\n"
1525                       "ETMCR: 0x%08x\n"
1526                       "ETMTRACEIDR: 0x%08x\n"
1527                       "Enable event: 0x%08x\n"
1528                       "Enable start/stop: 0x%08x\n"
1529                       "Enable control: CR1 0x%08x CR2 0x%08x\n"
1530                       "CPU affinity: %d\n",
1531                       drvdata->etmccr, drvdata->etmccer,
1532                       etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1533                       etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1534                       etm_readl(drvdata, ETMTEEVR),
1535                       etm_readl(drvdata, ETMTSSCR),
1536                       etm_readl(drvdata, ETMTECR1),
1537                       etm_readl(drvdata, ETMTECR2),
1538                       drvdata->cpu);
1539         CS_LOCK(drvdata->base);
1540
1541         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1542         pm_runtime_put(drvdata->dev);
1543
1544         return ret;
1545 }
1546 static DEVICE_ATTR_RO(status);
1547
1548 static ssize_t traceid_show(struct device *dev,
1549                             struct device_attribute *attr, char *buf)
1550 {
1551         unsigned long val, flags;
1552         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1553
1554         if (!drvdata->enable) {
1555                 val = drvdata->traceid;
1556                 goto out;
1557         }
1558
1559         pm_runtime_get_sync(drvdata->dev);
1560         spin_lock_irqsave(&drvdata->spinlock, flags);
1561         CS_UNLOCK(drvdata->base);
1562
1563         val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1564
1565         CS_LOCK(drvdata->base);
1566         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1567         pm_runtime_put(drvdata->dev);
1568 out:
1569         return sprintf(buf, "%#lx\n", val);
1570 }
1571
1572 static ssize_t traceid_store(struct device *dev,
1573                              struct device_attribute *attr,
1574                              const char *buf, size_t size)
1575 {
1576         int ret;
1577         unsigned long val;
1578         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1579
1580         ret = kstrtoul(buf, 16, &val);
1581         if (ret)
1582                 return ret;
1583
1584         drvdata->traceid = val & ETM_TRACEID_MASK;
1585         return size;
1586 }
1587 static DEVICE_ATTR_RW(traceid);
1588
1589 static struct attribute *coresight_etm_attrs[] = {
1590         &dev_attr_nr_addr_cmp.attr,
1591         &dev_attr_nr_cntr.attr,
1592         &dev_attr_nr_ctxid_cmp.attr,
1593         &dev_attr_etmsr.attr,
1594         &dev_attr_reset.attr,
1595         &dev_attr_mode.attr,
1596         &dev_attr_trigger_event.attr,
1597         &dev_attr_enable_event.attr,
1598         &dev_attr_fifofull_level.attr,
1599         &dev_attr_addr_idx.attr,
1600         &dev_attr_addr_single.attr,
1601         &dev_attr_addr_range.attr,
1602         &dev_attr_addr_start.attr,
1603         &dev_attr_addr_stop.attr,
1604         &dev_attr_addr_acctype.attr,
1605         &dev_attr_cntr_idx.attr,
1606         &dev_attr_cntr_rld_val.attr,
1607         &dev_attr_cntr_event.attr,
1608         &dev_attr_cntr_rld_event.attr,
1609         &dev_attr_cntr_val.attr,
1610         &dev_attr_seq_12_event.attr,
1611         &dev_attr_seq_21_event.attr,
1612         &dev_attr_seq_23_event.attr,
1613         &dev_attr_seq_31_event.attr,
1614         &dev_attr_seq_32_event.attr,
1615         &dev_attr_seq_13_event.attr,
1616         &dev_attr_seq_curr_state.attr,
1617         &dev_attr_ctxid_idx.attr,
1618         &dev_attr_ctxid_pid.attr,
1619         &dev_attr_ctxid_mask.attr,
1620         &dev_attr_sync_freq.attr,
1621         &dev_attr_timestamp_event.attr,
1622         &dev_attr_status.attr,
1623         &dev_attr_traceid.attr,
1624         NULL,
1625 };
1626 ATTRIBUTE_GROUPS(coresight_etm);
1627
1628 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1629                             void *hcpu)
1630 {
1631         unsigned int cpu = (unsigned long)hcpu;
1632
1633         if (!etmdrvdata[cpu])
1634                 goto out;
1635
1636         switch (action & (~CPU_TASKS_FROZEN)) {
1637         case CPU_STARTING:
1638                 spin_lock(&etmdrvdata[cpu]->spinlock);
1639                 if (!etmdrvdata[cpu]->os_unlock) {
1640                         etm_os_unlock(etmdrvdata[cpu]);
1641                         etmdrvdata[cpu]->os_unlock = true;
1642                 }
1643
1644                 if (etmdrvdata[cpu]->enable)
1645                         etm_enable_hw(etmdrvdata[cpu]);
1646                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1647                 break;
1648
1649         case CPU_ONLINE:
1650                 if (etmdrvdata[cpu]->boot_enable &&
1651                     !etmdrvdata[cpu]->sticky_enable)
1652                         coresight_enable(etmdrvdata[cpu]->csdev);
1653                 break;
1654
1655         case CPU_DYING:
1656                 spin_lock(&etmdrvdata[cpu]->spinlock);
1657                 if (etmdrvdata[cpu]->enable)
1658                         etm_disable_hw(etmdrvdata[cpu]);
1659                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1660                 break;
1661         }
1662 out:
1663         return NOTIFY_OK;
1664 }
1665
1666 static struct notifier_block etm_cpu_notifier = {
1667         .notifier_call = etm_cpu_callback,
1668 };
1669
1670 static bool etm_arch_supported(u8 arch)
1671 {
1672         switch (arch) {
1673         case ETM_ARCH_V3_3:
1674                 break;
1675         case ETM_ARCH_V3_5:
1676                 break;
1677         case PFT_ARCH_V1_0:
1678                 break;
1679         case PFT_ARCH_V1_1:
1680                 break;
1681         default:
1682                 return false;
1683         }
1684         return true;
1685 }
1686
1687 static void etm_init_arch_data(void *info)
1688 {
1689         u32 etmidr;
1690         u32 etmccr;
1691         struct etm_drvdata *drvdata = info;
1692
1693         CS_UNLOCK(drvdata->base);
1694
1695         /* First dummy read */
1696         (void)etm_readl(drvdata, ETMPDSR);
1697         /* Provide power to ETM: ETMPDCR[3] == 1 */
1698         etm_set_pwrup(drvdata);
1699         /*
1700          * Clear power down bit since when this bit is set writes to
1701          * certain registers might be ignored.
1702          */
1703         etm_clr_pwrdwn(drvdata);
1704         /*
1705          * Set prog bit. It will be set from reset but this is included to
1706          * ensure it is set
1707          */
1708         etm_set_prog(drvdata);
1709
1710         /* Find all capabilities */
1711         etmidr = etm_readl(drvdata, ETMIDR);
1712         drvdata->arch = BMVAL(etmidr, 4, 11);
1713         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1714
1715         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1716         etmccr = etm_readl(drvdata, ETMCCR);
1717         drvdata->etmccr = etmccr;
1718         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1719         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1720         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1721         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1722         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1723
1724         etm_set_pwrdwn(drvdata);
1725         etm_clr_pwrup(drvdata);
1726         CS_LOCK(drvdata->base);
1727 }
1728
1729 static void etm_init_default_data(struct etm_drvdata *drvdata)
1730 {
1731         /*
1732          * A trace ID of value 0 is invalid, so let's start at some
1733          * random value that fits in 7 bits and will be just as good.
1734          */
1735         static int etm3x_traceid = 0x10;
1736
1737         u32 flags = (1 << 0 | /* instruction execute*/
1738                      3 << 3 | /* ARM instruction */
1739                      0 << 5 | /* No data value comparison */
1740                      0 << 7 | /* No exact mach */
1741                      0 << 8 | /* Ignore context ID */
1742                      0 << 10); /* Security ignored */
1743
1744         /*
1745          * Initial configuration only - guarantees sources handled by
1746          * this driver have a unique ID at startup time but not between
1747          * all other types of sources.  For that we lean on the core
1748          * framework.
1749          */
1750         drvdata->traceid = etm3x_traceid++;
1751         drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1752         drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1753         if (drvdata->nr_addr_cmp >= 2) {
1754                 drvdata->addr_val[0] = (u32) _stext;
1755                 drvdata->addr_val[1] = (u32) _etext;
1756                 drvdata->addr_acctype[0] = flags;
1757                 drvdata->addr_acctype[1] = flags;
1758                 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1759                 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1760         }
1761
1762         etm_set_default(drvdata);
1763 }
1764
1765 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1766 {
1767         int ret;
1768         void __iomem *base;
1769         struct device *dev = &adev->dev;
1770         struct coresight_platform_data *pdata = NULL;
1771         struct etm_drvdata *drvdata;
1772         struct resource *res = &adev->res;
1773         struct coresight_desc *desc;
1774         struct device_node *np = adev->dev.of_node;
1775
1776         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1777         if (!desc)
1778                 return -ENOMEM;
1779
1780         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1781         if (!drvdata)
1782                 return -ENOMEM;
1783
1784         if (np) {
1785                 pdata = of_get_coresight_platform_data(dev, np);
1786                 if (IS_ERR(pdata))
1787                         return PTR_ERR(pdata);
1788
1789                 adev->dev.platform_data = pdata;
1790                 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1791         }
1792
1793         drvdata->dev = &adev->dev;
1794         dev_set_drvdata(dev, drvdata);
1795
1796         /* Validity for the resource is already checked by the AMBA core */
1797         base = devm_ioremap_resource(dev, res);
1798         if (IS_ERR(base))
1799                 return PTR_ERR(base);
1800
1801         drvdata->base = base;
1802
1803         spin_lock_init(&drvdata->spinlock);
1804
1805         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
1806         if (!IS_ERR(drvdata->atclk)) {
1807                 ret = clk_prepare_enable(drvdata->atclk);
1808                 if (ret)
1809                         return ret;
1810         }
1811
1812         drvdata->cpu = pdata ? pdata->cpu : 0;
1813
1814         get_online_cpus();
1815         etmdrvdata[drvdata->cpu] = drvdata;
1816
1817         if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1818                 drvdata->os_unlock = true;
1819
1820         if (smp_call_function_single(drvdata->cpu,
1821                                      etm_init_arch_data,  drvdata, 1))
1822                 dev_err(dev, "ETM arch init failed\n");
1823
1824         if (!etm_count++)
1825                 register_hotcpu_notifier(&etm_cpu_notifier);
1826
1827         put_online_cpus();
1828
1829         if (etm_arch_supported(drvdata->arch) == false) {
1830                 ret = -EINVAL;
1831                 goto err_arch_supported;
1832         }
1833         etm_init_default_data(drvdata);
1834
1835         desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1836         desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1837         desc->ops = &etm_cs_ops;
1838         desc->pdata = pdata;
1839         desc->dev = dev;
1840         desc->groups = coresight_etm_groups;
1841         drvdata->csdev = coresight_register(desc);
1842         if (IS_ERR(drvdata->csdev)) {
1843                 ret = PTR_ERR(drvdata->csdev);
1844                 goto err_arch_supported;
1845         }
1846
1847         pm_runtime_put(&adev->dev);
1848         dev_info(dev, "%s initialized\n", (char *)id->data);
1849
1850         if (boot_enable) {
1851                 coresight_enable(drvdata->csdev);
1852                 drvdata->boot_enable = true;
1853         }
1854
1855         return 0;
1856
1857 err_arch_supported:
1858         if (--etm_count == 0)
1859                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1860         return ret;
1861 }
1862
1863 static int etm_remove(struct amba_device *adev)
1864 {
1865         struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1866
1867         coresight_unregister(drvdata->csdev);
1868         if (--etm_count == 0)
1869                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1870
1871         return 0;
1872 }
1873
1874 #ifdef CONFIG_PM
1875 static int etm_runtime_suspend(struct device *dev)
1876 {
1877         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1878
1879         if (drvdata && !IS_ERR(drvdata->atclk))
1880                 clk_disable_unprepare(drvdata->atclk);
1881
1882         return 0;
1883 }
1884
1885 static int etm_runtime_resume(struct device *dev)
1886 {
1887         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1888
1889         if (drvdata && !IS_ERR(drvdata->atclk))
1890                 clk_prepare_enable(drvdata->atclk);
1891
1892         return 0;
1893 }
1894 #endif
1895
1896 static const struct dev_pm_ops etm_dev_pm_ops = {
1897         SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
1898 };
1899
1900 static struct amba_id etm_ids[] = {
1901         {       /* ETM 3.3 */
1902                 .id     = 0x0003b921,
1903                 .mask   = 0x0003ffff,
1904                 .data   = "ETM 3.3",
1905         },
1906         {       /* ETM 3.5 */
1907                 .id     = 0x0003b956,
1908                 .mask   = 0x0003ffff,
1909                 .data   = "ETM 3.5",
1910         },
1911         {       /* PTM 1.0 */
1912                 .id     = 0x0003b950,
1913                 .mask   = 0x0003ffff,
1914                 .data   = "PTM 1.0",
1915         },
1916         {       /* PTM 1.1 */
1917                 .id     = 0x0003b95f,
1918                 .mask   = 0x0003ffff,
1919                 .data   = "PTM 1.1",
1920         },
1921         {       /* PTM 1.1 Qualcomm */
1922                 .id     = 0x0003006f,
1923                 .mask   = 0x0003ffff,
1924                 .data   = "PTM 1.1",
1925         },
1926         { 0, 0},
1927 };
1928
1929 static struct amba_driver etm_driver = {
1930         .drv = {
1931                 .name   = "coresight-etm3x",
1932                 .owner  = THIS_MODULE,
1933                 .pm     = &etm_dev_pm_ops,
1934         },
1935         .probe          = etm_probe,
1936         .remove         = etm_remove,
1937         .id_table       = etm_ids,
1938 };
1939
1940 module_amba_driver(etm_driver);
1941
1942 MODULE_LICENSE("GPL v2");
1943 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");