]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/trace/blktrace.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
30
31 #include <trace/events/block.h>
32
33 #include "trace_output.h"
34
35 #ifdef CONFIG_BLK_DEV_IO_TRACE
36
37 static unsigned int blktrace_seq __read_mostly = 1;
38
39 static struct trace_array *blk_tr;
40 static bool blk_tracer_enabled __read_mostly;
41
42 static LIST_HEAD(running_trace_list);
43 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
44
45 /* Select an alternative, minimalistic output than the original one */
46 #define TRACE_BLK_OPT_CLASSIC   0x1
47
48 static struct tracer_opt blk_tracer_opts[] = {
49         /* Default disable the minimalistic output */
50         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
51         { }
52 };
53
54 static struct tracer_flags blk_tracer_flags = {
55         .val  = 0,
56         .opts = blk_tracer_opts,
57 };
58
59 /* Global reference count of probes */
60 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
61
62 static void blk_register_tracepoints(void);
63 static void blk_unregister_tracepoints(void);
64
65 /*
66  * Send out a notify message.
67  */
68 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
69                        const void *data, size_t len)
70 {
71         struct blk_io_trace *t;
72         struct ring_buffer_event *event = NULL;
73         struct ring_buffer *buffer = NULL;
74         int pc = 0;
75         int cpu = smp_processor_id();
76         bool blk_tracer = blk_tracer_enabled;
77
78         if (blk_tracer) {
79                 buffer = blk_tr->trace_buffer.buffer;
80                 pc = preempt_count();
81                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
82                                                   sizeof(*t) + len,
83                                                   0, pc);
84                 if (!event)
85                         return;
86                 t = ring_buffer_event_data(event);
87                 goto record_it;
88         }
89
90         if (!bt->rchan)
91                 return;
92
93         t = relay_reserve(bt->rchan, sizeof(*t) + len);
94         if (t) {
95                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
96                 t->time = ktime_to_ns(ktime_get());
97 record_it:
98                 t->device = bt->dev;
99                 t->action = action;
100                 t->pid = pid;
101                 t->cpu = cpu;
102                 t->pdu_len = len;
103                 memcpy((void *) t + sizeof(*t), data, len);
104
105                 if (blk_tracer)
106                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
107         }
108 }
109
110 /*
111  * Send out a notify for this process, if we haven't done so since a trace
112  * started
113  */
114 static void trace_note_tsk(struct task_struct *tsk)
115 {
116         unsigned long flags;
117         struct blk_trace *bt;
118
119         tsk->btrace_seq = blktrace_seq;
120         spin_lock_irqsave(&running_trace_lock, flags);
121         list_for_each_entry(bt, &running_trace_list, running_list) {
122                 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
123                            sizeof(tsk->comm));
124         }
125         spin_unlock_irqrestore(&running_trace_lock, flags);
126 }
127
128 static void trace_note_time(struct blk_trace *bt)
129 {
130         struct timespec64 now;
131         unsigned long flags;
132         u32 words[2];
133
134         /* need to check user space to see if this breaks in y2038 or y2106 */
135         ktime_get_real_ts64(&now);
136         words[0] = (u32)now.tv_sec;
137         words[1] = now.tv_nsec;
138
139         local_irq_save(flags);
140         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
141         local_irq_restore(flags);
142 }
143
144 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
145 {
146         int n;
147         va_list args;
148         unsigned long flags;
149         char *buf;
150
151         if (unlikely(bt->trace_state != Blktrace_running &&
152                      !blk_tracer_enabled))
153                 return;
154
155         /*
156          * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
157          * message to the trace.
158          */
159         if (!(bt->act_mask & BLK_TC_NOTIFY))
160                 return;
161
162         local_irq_save(flags);
163         buf = this_cpu_ptr(bt->msg_data);
164         va_start(args, fmt);
165         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
166         va_end(args);
167
168         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
169         local_irq_restore(flags);
170 }
171 EXPORT_SYMBOL_GPL(__trace_note_message);
172
173 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
174                          pid_t pid)
175 {
176         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
177                 return 1;
178         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
179                 return 1;
180         if (bt->pid && pid != bt->pid)
181                 return 1;
182
183         return 0;
184 }
185
186 /*
187  * Data direction bit lookup
188  */
189 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
190                                  BLK_TC_ACT(BLK_TC_WRITE) };
191
192 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
193 #define BLK_TC_PREFLUSH         BLK_TC_FLUSH
194
195 /* The ilog2() calls fall out because they're constant */
196 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
197           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
198
199 /*
200  * The worker for the various blk_add_trace*() types. Fills out a
201  * blk_io_trace structure and places it in a per-cpu subbuffer.
202  */
203 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
204                      int op, int op_flags, u32 what, int error, int pdu_len,
205                      void *pdu_data)
206 {
207         struct task_struct *tsk = current;
208         struct ring_buffer_event *event = NULL;
209         struct ring_buffer *buffer = NULL;
210         struct blk_io_trace *t;
211         unsigned long flags = 0;
212         unsigned long *sequence;
213         pid_t pid;
214         int cpu, pc = 0;
215         bool blk_tracer = blk_tracer_enabled;
216
217         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
218                 return;
219
220         what |= ddir_act[op_is_write(op) ? WRITE : READ];
221         what |= MASK_TC_BIT(op_flags, SYNC);
222         what |= MASK_TC_BIT(op_flags, RAHEAD);
223         what |= MASK_TC_BIT(op_flags, META);
224         what |= MASK_TC_BIT(op_flags, PREFLUSH);
225         what |= MASK_TC_BIT(op_flags, FUA);
226         if (op == REQ_OP_DISCARD)
227                 what |= BLK_TC_ACT(BLK_TC_DISCARD);
228         if (op == REQ_OP_FLUSH)
229                 what |= BLK_TC_ACT(BLK_TC_FLUSH);
230
231         pid = tsk->pid;
232         if (act_log_check(bt, what, sector, pid))
233                 return;
234         cpu = raw_smp_processor_id();
235
236         if (blk_tracer) {
237                 tracing_record_cmdline(current);
238
239                 buffer = blk_tr->trace_buffer.buffer;
240                 pc = preempt_count();
241                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
242                                                   sizeof(*t) + pdu_len,
243                                                   0, pc);
244                 if (!event)
245                         return;
246                 t = ring_buffer_event_data(event);
247                 goto record_it;
248         }
249
250         if (unlikely(tsk->btrace_seq != blktrace_seq))
251                 trace_note_tsk(tsk);
252
253         /*
254          * A word about the locking here - we disable interrupts to reserve
255          * some space in the relay per-cpu buffer, to prevent an irq
256          * from coming in and stepping on our toes.
257          */
258         local_irq_save(flags);
259         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
260         if (t) {
261                 sequence = per_cpu_ptr(bt->sequence, cpu);
262
263                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
264                 t->sequence = ++(*sequence);
265                 t->time = ktime_to_ns(ktime_get());
266 record_it:
267                 /*
268                  * These two are not needed in ftrace as they are in the
269                  * generic trace_entry, filled by tracing_generic_entry_update,
270                  * but for the trace_event->bin() synthesizer benefit we do it
271                  * here too.
272                  */
273                 t->cpu = cpu;
274                 t->pid = pid;
275
276                 t->sector = sector;
277                 t->bytes = bytes;
278                 t->action = what;
279                 t->device = bt->dev;
280                 t->error = error;
281                 t->pdu_len = pdu_len;
282
283                 if (pdu_len)
284                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
285
286                 if (blk_tracer) {
287                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
288                         return;
289                 }
290         }
291
292         local_irq_restore(flags);
293 }
294
295 static struct dentry *blk_tree_root;
296 static DEFINE_MUTEX(blk_tree_mutex);
297
298 static void blk_trace_free(struct blk_trace *bt)
299 {
300         debugfs_remove(bt->msg_file);
301         debugfs_remove(bt->dropped_file);
302         relay_close(bt->rchan);
303         debugfs_remove(bt->dir);
304         free_percpu(bt->sequence);
305         free_percpu(bt->msg_data);
306         kfree(bt);
307 }
308
309 static void blk_trace_cleanup(struct blk_trace *bt)
310 {
311         blk_trace_free(bt);
312         if (atomic_dec_and_test(&blk_probes_ref))
313                 blk_unregister_tracepoints();
314 }
315
316 int blk_trace_remove(struct request_queue *q)
317 {
318         struct blk_trace *bt;
319
320         bt = xchg(&q->blk_trace, NULL);
321         if (!bt)
322                 return -EINVAL;
323
324         if (bt->trace_state != Blktrace_running)
325                 blk_trace_cleanup(bt);
326
327         return 0;
328 }
329 EXPORT_SYMBOL_GPL(blk_trace_remove);
330
331 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
332                                 size_t count, loff_t *ppos)
333 {
334         struct blk_trace *bt = filp->private_data;
335         char buf[16];
336
337         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
338
339         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
340 }
341
342 static const struct file_operations blk_dropped_fops = {
343         .owner =        THIS_MODULE,
344         .open =         simple_open,
345         .read =         blk_dropped_read,
346         .llseek =       default_llseek,
347 };
348
349 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
350                                 size_t count, loff_t *ppos)
351 {
352         char *msg;
353         struct blk_trace *bt;
354
355         if (count >= BLK_TN_MAX_MSG)
356                 return -EINVAL;
357
358         msg = memdup_user_nul(buffer, count);
359         if (IS_ERR(msg))
360                 return PTR_ERR(msg);
361
362         bt = filp->private_data;
363         __trace_note_message(bt, "%s", msg);
364         kfree(msg);
365
366         return count;
367 }
368
369 static const struct file_operations blk_msg_fops = {
370         .owner =        THIS_MODULE,
371         .open =         simple_open,
372         .write =        blk_msg_write,
373         .llseek =       noop_llseek,
374 };
375
376 /*
377  * Keep track of how many times we encountered a full subbuffer, to aid
378  * the user space app in telling how many lost events there were.
379  */
380 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
381                                      void *prev_subbuf, size_t prev_padding)
382 {
383         struct blk_trace *bt;
384
385         if (!relay_buf_full(buf))
386                 return 1;
387
388         bt = buf->chan->private_data;
389         atomic_inc(&bt->dropped);
390         return 0;
391 }
392
393 static int blk_remove_buf_file_callback(struct dentry *dentry)
394 {
395         debugfs_remove(dentry);
396
397         return 0;
398 }
399
400 static struct dentry *blk_create_buf_file_callback(const char *filename,
401                                                    struct dentry *parent,
402                                                    umode_t mode,
403                                                    struct rchan_buf *buf,
404                                                    int *is_global)
405 {
406         return debugfs_create_file(filename, mode, parent, buf,
407                                         &relay_file_operations);
408 }
409
410 static struct rchan_callbacks blk_relay_callbacks = {
411         .subbuf_start           = blk_subbuf_start_callback,
412         .create_buf_file        = blk_create_buf_file_callback,
413         .remove_buf_file        = blk_remove_buf_file_callback,
414 };
415
416 static void blk_trace_setup_lba(struct blk_trace *bt,
417                                 struct block_device *bdev)
418 {
419         struct hd_struct *part = NULL;
420
421         if (bdev)
422                 part = bdev->bd_part;
423
424         if (part) {
425                 bt->start_lba = part->start_sect;
426                 bt->end_lba = part->start_sect + part->nr_sects;
427         } else {
428                 bt->start_lba = 0;
429                 bt->end_lba = -1ULL;
430         }
431 }
432
433 /*
434  * Setup everything required to start tracing
435  */
436 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
437                        struct block_device *bdev,
438                        struct blk_user_trace_setup *buts)
439 {
440         struct blk_trace *bt = NULL;
441         struct dentry *dir = NULL;
442         int ret;
443
444         if (!buts->buf_size || !buts->buf_nr)
445                 return -EINVAL;
446
447         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
448         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
449
450         /*
451          * some device names have larger paths - convert the slashes
452          * to underscores for this to work as expected
453          */
454         strreplace(buts->name, '/', '_');
455
456         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
457         if (!bt)
458                 return -ENOMEM;
459
460         ret = -ENOMEM;
461         bt->sequence = alloc_percpu(unsigned long);
462         if (!bt->sequence)
463                 goto err;
464
465         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
466         if (!bt->msg_data)
467                 goto err;
468
469         ret = -ENOENT;
470
471         mutex_lock(&blk_tree_mutex);
472         if (!blk_tree_root) {
473                 blk_tree_root = debugfs_create_dir("block", NULL);
474                 if (!blk_tree_root) {
475                         mutex_unlock(&blk_tree_mutex);
476                         goto err;
477                 }
478         }
479         mutex_unlock(&blk_tree_mutex);
480
481         dir = debugfs_create_dir(buts->name, blk_tree_root);
482
483         if (!dir)
484                 goto err;
485
486         bt->dir = dir;
487         bt->dev = dev;
488         atomic_set(&bt->dropped, 0);
489         INIT_LIST_HEAD(&bt->running_list);
490
491         ret = -EIO;
492         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
493                                                &blk_dropped_fops);
494         if (!bt->dropped_file)
495                 goto err;
496
497         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
498         if (!bt->msg_file)
499                 goto err;
500
501         bt->rchan = relay_open("trace", dir, buts->buf_size,
502                                 buts->buf_nr, &blk_relay_callbacks, bt);
503         if (!bt->rchan)
504                 goto err;
505
506         bt->act_mask = buts->act_mask;
507         if (!bt->act_mask)
508                 bt->act_mask = (u16) -1;
509
510         blk_trace_setup_lba(bt, bdev);
511
512         /* overwrite with user settings */
513         if (buts->start_lba)
514                 bt->start_lba = buts->start_lba;
515         if (buts->end_lba)
516                 bt->end_lba = buts->end_lba;
517
518         bt->pid = buts->pid;
519         bt->trace_state = Blktrace_setup;
520
521         ret = -EBUSY;
522         if (cmpxchg(&q->blk_trace, NULL, bt))
523                 goto err;
524
525         if (atomic_inc_return(&blk_probes_ref) == 1)
526                 blk_register_tracepoints();
527
528         return 0;
529 err:
530         blk_trace_free(bt);
531         return ret;
532 }
533
534 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
535                     struct block_device *bdev,
536                     char __user *arg)
537 {
538         struct blk_user_trace_setup buts;
539         int ret;
540
541         ret = copy_from_user(&buts, arg, sizeof(buts));
542         if (ret)
543                 return -EFAULT;
544
545         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
546         if (ret)
547                 return ret;
548
549         if (copy_to_user(arg, &buts, sizeof(buts))) {
550                 blk_trace_remove(q);
551                 return -EFAULT;
552         }
553         return 0;
554 }
555 EXPORT_SYMBOL_GPL(blk_trace_setup);
556
557 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
558 static int compat_blk_trace_setup(struct request_queue *q, char *name,
559                                   dev_t dev, struct block_device *bdev,
560                                   char __user *arg)
561 {
562         struct blk_user_trace_setup buts;
563         struct compat_blk_user_trace_setup cbuts;
564         int ret;
565
566         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
567                 return -EFAULT;
568
569         buts = (struct blk_user_trace_setup) {
570                 .act_mask = cbuts.act_mask,
571                 .buf_size = cbuts.buf_size,
572                 .buf_nr = cbuts.buf_nr,
573                 .start_lba = cbuts.start_lba,
574                 .end_lba = cbuts.end_lba,
575                 .pid = cbuts.pid,
576         };
577
578         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
579         if (ret)
580                 return ret;
581
582         if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
583                 blk_trace_remove(q);
584                 return -EFAULT;
585         }
586
587         return 0;
588 }
589 #endif
590
591 int blk_trace_startstop(struct request_queue *q, int start)
592 {
593         int ret;
594         struct blk_trace *bt = q->blk_trace;
595
596         if (bt == NULL)
597                 return -EINVAL;
598
599         /*
600          * For starting a trace, we can transition from a setup or stopped
601          * trace. For stopping a trace, the state must be running
602          */
603         ret = -EINVAL;
604         if (start) {
605                 if (bt->trace_state == Blktrace_setup ||
606                     bt->trace_state == Blktrace_stopped) {
607                         blktrace_seq++;
608                         smp_mb();
609                         bt->trace_state = Blktrace_running;
610                         spin_lock_irq(&running_trace_lock);
611                         list_add(&bt->running_list, &running_trace_list);
612                         spin_unlock_irq(&running_trace_lock);
613
614                         trace_note_time(bt);
615                         ret = 0;
616                 }
617         } else {
618                 if (bt->trace_state == Blktrace_running) {
619                         bt->trace_state = Blktrace_stopped;
620                         spin_lock_irq(&running_trace_lock);
621                         list_del_init(&bt->running_list);
622                         spin_unlock_irq(&running_trace_lock);
623                         relay_flush(bt->rchan);
624                         ret = 0;
625                 }
626         }
627
628         return ret;
629 }
630 EXPORT_SYMBOL_GPL(blk_trace_startstop);
631
632 /**
633  * blk_trace_ioctl: - handle the ioctls associated with tracing
634  * @bdev:       the block device
635  * @cmd:        the ioctl cmd
636  * @arg:        the argument data, if any
637  *
638  **/
639 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
640 {
641         struct request_queue *q;
642         int ret, start = 0;
643         char b[BDEVNAME_SIZE];
644
645         q = bdev_get_queue(bdev);
646         if (!q)
647                 return -ENXIO;
648
649         mutex_lock(&bdev->bd_mutex);
650
651         switch (cmd) {
652         case BLKTRACESETUP:
653                 bdevname(bdev, b);
654                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
655                 break;
656 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
657         case BLKTRACESETUP32:
658                 bdevname(bdev, b);
659                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
660                 break;
661 #endif
662         case BLKTRACESTART:
663                 start = 1;
664         case BLKTRACESTOP:
665                 ret = blk_trace_startstop(q, start);
666                 break;
667         case BLKTRACETEARDOWN:
668                 ret = blk_trace_remove(q);
669                 break;
670         default:
671                 ret = -ENOTTY;
672                 break;
673         }
674
675         mutex_unlock(&bdev->bd_mutex);
676         return ret;
677 }
678
679 /**
680  * blk_trace_shutdown: - stop and cleanup trace structures
681  * @q:    the request queue associated with the device
682  *
683  **/
684 void blk_trace_shutdown(struct request_queue *q)
685 {
686         if (q->blk_trace) {
687                 blk_trace_startstop(q, 0);
688                 blk_trace_remove(q);
689         }
690 }
691
692 /*
693  * blktrace probes
694  */
695
696 /**
697  * blk_add_trace_rq - Add a trace for a request oriented action
698  * @q:          queue the io is for
699  * @rq:         the source request
700  * @nr_bytes:   number of completed bytes
701  * @what:       the action
702  *
703  * Description:
704  *     Records an action against a request. Will log the bio offset + size.
705  *
706  **/
707 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
708                              unsigned int nr_bytes, u32 what)
709 {
710         struct blk_trace *bt = q->blk_trace;
711
712         if (likely(!bt))
713                 return;
714
715         if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
716                 what |= BLK_TC_ACT(BLK_TC_PC);
717                 __blk_add_trace(bt, 0, nr_bytes, req_op(rq), rq->cmd_flags,
718                                 what, rq->errors, rq->cmd_len, rq->cmd);
719         } else  {
720                 what |= BLK_TC_ACT(BLK_TC_FS);
721                 __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, req_op(rq),
722                                 rq->cmd_flags, what, rq->errors, 0, NULL);
723         }
724 }
725
726 static void blk_add_trace_rq_abort(void *ignore,
727                                    struct request_queue *q, struct request *rq)
728 {
729         blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
730 }
731
732 static void blk_add_trace_rq_insert(void *ignore,
733                                     struct request_queue *q, struct request *rq)
734 {
735         blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
736 }
737
738 static void blk_add_trace_rq_issue(void *ignore,
739                                    struct request_queue *q, struct request *rq)
740 {
741         blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
742 }
743
744 static void blk_add_trace_rq_requeue(void *ignore,
745                                      struct request_queue *q,
746                                      struct request *rq)
747 {
748         blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
749 }
750
751 static void blk_add_trace_rq_complete(void *ignore,
752                                       struct request_queue *q,
753                                       struct request *rq,
754                                       unsigned int nr_bytes)
755 {
756         blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
757 }
758
759 /**
760  * blk_add_trace_bio - Add a trace for a bio oriented action
761  * @q:          queue the io is for
762  * @bio:        the source bio
763  * @what:       the action
764  * @error:      error, if any
765  *
766  * Description:
767  *     Records an action against a bio. Will log the bio offset + size.
768  *
769  **/
770 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
771                               u32 what, int error)
772 {
773         struct blk_trace *bt = q->blk_trace;
774
775         if (likely(!bt))
776                 return;
777
778         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
779                         bio_op(bio), bio->bi_rw, what, error, 0, NULL);
780 }
781
782 static void blk_add_trace_bio_bounce(void *ignore,
783                                      struct request_queue *q, struct bio *bio)
784 {
785         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
786 }
787
788 static void blk_add_trace_bio_complete(void *ignore,
789                                        struct request_queue *q, struct bio *bio,
790                                        int error)
791 {
792         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
793 }
794
795 static void blk_add_trace_bio_backmerge(void *ignore,
796                                         struct request_queue *q,
797                                         struct request *rq,
798                                         struct bio *bio)
799 {
800         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
801 }
802
803 static void blk_add_trace_bio_frontmerge(void *ignore,
804                                          struct request_queue *q,
805                                          struct request *rq,
806                                          struct bio *bio)
807 {
808         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
809 }
810
811 static void blk_add_trace_bio_queue(void *ignore,
812                                     struct request_queue *q, struct bio *bio)
813 {
814         blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
815 }
816
817 static void blk_add_trace_getrq(void *ignore,
818                                 struct request_queue *q,
819                                 struct bio *bio, int rw)
820 {
821         if (bio)
822                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
823         else {
824                 struct blk_trace *bt = q->blk_trace;
825
826                 if (bt)
827                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
828                                         NULL);
829         }
830 }
831
832
833 static void blk_add_trace_sleeprq(void *ignore,
834                                   struct request_queue *q,
835                                   struct bio *bio, int rw)
836 {
837         if (bio)
838                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
839         else {
840                 struct blk_trace *bt = q->blk_trace;
841
842                 if (bt)
843                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
844                                         0, 0, NULL);
845         }
846 }
847
848 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
849 {
850         struct blk_trace *bt = q->blk_trace;
851
852         if (bt)
853                 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
854 }
855
856 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
857                                     unsigned int depth, bool explicit)
858 {
859         struct blk_trace *bt = q->blk_trace;
860
861         if (bt) {
862                 __be64 rpdu = cpu_to_be64(depth);
863                 u32 what;
864
865                 if (explicit)
866                         what = BLK_TA_UNPLUG_IO;
867                 else
868                         what = BLK_TA_UNPLUG_TIMER;
869
870                 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
871         }
872 }
873
874 static void blk_add_trace_split(void *ignore,
875                                 struct request_queue *q, struct bio *bio,
876                                 unsigned int pdu)
877 {
878         struct blk_trace *bt = q->blk_trace;
879
880         if (bt) {
881                 __be64 rpdu = cpu_to_be64(pdu);
882
883                 __blk_add_trace(bt, bio->bi_iter.bi_sector,
884                                 bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
885                                 BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
886                                 &rpdu);
887         }
888 }
889
890 /**
891  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
892  * @ignore:     trace callback data parameter (not used)
893  * @q:          queue the io is for
894  * @bio:        the source bio
895  * @dev:        target device
896  * @from:       source sector
897  *
898  * Description:
899  *     Device mapper or raid target sometimes need to split a bio because
900  *     it spans a stripe (or similar). Add a trace for that action.
901  *
902  **/
903 static void blk_add_trace_bio_remap(void *ignore,
904                                     struct request_queue *q, struct bio *bio,
905                                     dev_t dev, sector_t from)
906 {
907         struct blk_trace *bt = q->blk_trace;
908         struct blk_io_trace_remap r;
909
910         if (likely(!bt))
911                 return;
912
913         r.device_from = cpu_to_be32(dev);
914         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
915         r.sector_from = cpu_to_be64(from);
916
917         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
918                         bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
919                         sizeof(r), &r);
920 }
921
922 /**
923  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
924  * @ignore:     trace callback data parameter (not used)
925  * @q:          queue the io is for
926  * @rq:         the source request
927  * @dev:        target device
928  * @from:       source sector
929  *
930  * Description:
931  *     Device mapper remaps request to other devices.
932  *     Add a trace for that action.
933  *
934  **/
935 static void blk_add_trace_rq_remap(void *ignore,
936                                    struct request_queue *q,
937                                    struct request *rq, dev_t dev,
938                                    sector_t from)
939 {
940         struct blk_trace *bt = q->blk_trace;
941         struct blk_io_trace_remap r;
942
943         if (likely(!bt))
944                 return;
945
946         r.device_from = cpu_to_be32(dev);
947         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
948         r.sector_from = cpu_to_be64(from);
949
950         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
951                         rq_data_dir(rq), 0, BLK_TA_REMAP, !!rq->errors,
952                         sizeof(r), &r);
953 }
954
955 /**
956  * blk_add_driver_data - Add binary message with driver-specific data
957  * @q:          queue the io is for
958  * @rq:         io request
959  * @data:       driver-specific data
960  * @len:        length of driver-specific data
961  *
962  * Description:
963  *     Some drivers might want to write driver-specific data per request.
964  *
965  **/
966 void blk_add_driver_data(struct request_queue *q,
967                          struct request *rq,
968                          void *data, size_t len)
969 {
970         struct blk_trace *bt = q->blk_trace;
971
972         if (likely(!bt))
973                 return;
974
975         if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
976                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 0,
977                                 BLK_TA_DRV_DATA, rq->errors, len, data);
978         else
979                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, 0,
980                                 BLK_TA_DRV_DATA, rq->errors, len, data);
981 }
982 EXPORT_SYMBOL_GPL(blk_add_driver_data);
983
984 static void blk_register_tracepoints(void)
985 {
986         int ret;
987
988         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
989         WARN_ON(ret);
990         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
991         WARN_ON(ret);
992         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
993         WARN_ON(ret);
994         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
995         WARN_ON(ret);
996         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
997         WARN_ON(ret);
998         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
999         WARN_ON(ret);
1000         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1001         WARN_ON(ret);
1002         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1003         WARN_ON(ret);
1004         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1005         WARN_ON(ret);
1006         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1007         WARN_ON(ret);
1008         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1009         WARN_ON(ret);
1010         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1011         WARN_ON(ret);
1012         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1013         WARN_ON(ret);
1014         ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1015         WARN_ON(ret);
1016         ret = register_trace_block_split(blk_add_trace_split, NULL);
1017         WARN_ON(ret);
1018         ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1019         WARN_ON(ret);
1020         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1021         WARN_ON(ret);
1022 }
1023
1024 static void blk_unregister_tracepoints(void)
1025 {
1026         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1027         unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1028         unregister_trace_block_split(blk_add_trace_split, NULL);
1029         unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1030         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1031         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1032         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1033         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1034         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1035         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1036         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1037         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1038         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1039         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1040         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1041         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1042         unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1043
1044         tracepoint_synchronize_unregister();
1045 }
1046
1047 /*
1048  * struct blk_io_tracer formatting routines
1049  */
1050
1051 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1052 {
1053         int i = 0;
1054         int tc = t->action >> BLK_TC_SHIFT;
1055
1056         if (t->action == BLK_TN_MESSAGE) {
1057                 rwbs[i++] = 'N';
1058                 goto out;
1059         }
1060
1061         if (tc & BLK_TC_FLUSH)
1062                 rwbs[i++] = 'F';
1063
1064         if (tc & BLK_TC_DISCARD)
1065                 rwbs[i++] = 'D';
1066         else if (tc & BLK_TC_WRITE)
1067                 rwbs[i++] = 'W';
1068         else if (t->bytes)
1069                 rwbs[i++] = 'R';
1070         else
1071                 rwbs[i++] = 'N';
1072
1073         if (tc & BLK_TC_FUA)
1074                 rwbs[i++] = 'F';
1075         if (tc & BLK_TC_AHEAD)
1076                 rwbs[i++] = 'A';
1077         if (tc & BLK_TC_SYNC)
1078                 rwbs[i++] = 'S';
1079         if (tc & BLK_TC_META)
1080                 rwbs[i++] = 'M';
1081 out:
1082         rwbs[i] = '\0';
1083 }
1084
1085 static inline
1086 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1087 {
1088         return (const struct blk_io_trace *)ent;
1089 }
1090
1091 static inline const void *pdu_start(const struct trace_entry *ent)
1092 {
1093         return te_blk_io_trace(ent) + 1;
1094 }
1095
1096 static inline u32 t_action(const struct trace_entry *ent)
1097 {
1098         return te_blk_io_trace(ent)->action;
1099 }
1100
1101 static inline u32 t_bytes(const struct trace_entry *ent)
1102 {
1103         return te_blk_io_trace(ent)->bytes;
1104 }
1105
1106 static inline u32 t_sec(const struct trace_entry *ent)
1107 {
1108         return te_blk_io_trace(ent)->bytes >> 9;
1109 }
1110
1111 static inline unsigned long long t_sector(const struct trace_entry *ent)
1112 {
1113         return te_blk_io_trace(ent)->sector;
1114 }
1115
1116 static inline __u16 t_error(const struct trace_entry *ent)
1117 {
1118         return te_blk_io_trace(ent)->error;
1119 }
1120
1121 static __u64 get_pdu_int(const struct trace_entry *ent)
1122 {
1123         const __u64 *val = pdu_start(ent);
1124         return be64_to_cpu(*val);
1125 }
1126
1127 static void get_pdu_remap(const struct trace_entry *ent,
1128                           struct blk_io_trace_remap *r)
1129 {
1130         const struct blk_io_trace_remap *__r = pdu_start(ent);
1131         __u64 sector_from = __r->sector_from;
1132
1133         r->device_from = be32_to_cpu(__r->device_from);
1134         r->device_to   = be32_to_cpu(__r->device_to);
1135         r->sector_from = be64_to_cpu(sector_from);
1136 }
1137
1138 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1139
1140 static void blk_log_action_classic(struct trace_iterator *iter, const char *act)
1141 {
1142         char rwbs[RWBS_LEN];
1143         unsigned long long ts  = iter->ts;
1144         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1145         unsigned secs          = (unsigned long)ts;
1146         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1147
1148         fill_rwbs(rwbs, t);
1149
1150         trace_seq_printf(&iter->seq,
1151                          "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1152                          MAJOR(t->device), MINOR(t->device), iter->cpu,
1153                          secs, nsec_rem, iter->ent->pid, act, rwbs);
1154 }
1155
1156 static void blk_log_action(struct trace_iterator *iter, const char *act)
1157 {
1158         char rwbs[RWBS_LEN];
1159         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1160
1161         fill_rwbs(rwbs, t);
1162         trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1163                          MAJOR(t->device), MINOR(t->device), act, rwbs);
1164 }
1165
1166 static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1167 {
1168         const unsigned char *pdu_buf;
1169         int pdu_len;
1170         int i, end;
1171
1172         pdu_buf = pdu_start(ent);
1173         pdu_len = te_blk_io_trace(ent)->pdu_len;
1174
1175         if (!pdu_len)
1176                 return;
1177
1178         /* find the last zero that needs to be printed */
1179         for (end = pdu_len - 1; end >= 0; end--)
1180                 if (pdu_buf[end])
1181                         break;
1182         end++;
1183
1184         trace_seq_putc(s, '(');
1185
1186         for (i = 0; i < pdu_len; i++) {
1187
1188                 trace_seq_printf(s, "%s%02x",
1189                                  i == 0 ? "" : " ", pdu_buf[i]);
1190
1191                 /*
1192                  * stop when the rest is just zeroes and indicate so
1193                  * with a ".." appended
1194                  */
1195                 if (i == end && end != pdu_len - 1) {
1196                         trace_seq_puts(s, " ..) ");
1197                         return;
1198                 }
1199         }
1200
1201         trace_seq_puts(s, ") ");
1202 }
1203
1204 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1205 {
1206         char cmd[TASK_COMM_LEN];
1207
1208         trace_find_cmdline(ent->pid, cmd);
1209
1210         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1211                 trace_seq_printf(s, "%u ", t_bytes(ent));
1212                 blk_log_dump_pdu(s, ent);
1213                 trace_seq_printf(s, "[%s]\n", cmd);
1214         } else {
1215                 if (t_sec(ent))
1216                         trace_seq_printf(s, "%llu + %u [%s]\n",
1217                                                 t_sector(ent), t_sec(ent), cmd);
1218                 else
1219                         trace_seq_printf(s, "[%s]\n", cmd);
1220         }
1221 }
1222
1223 static void blk_log_with_error(struct trace_seq *s,
1224                               const struct trace_entry *ent)
1225 {
1226         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1227                 blk_log_dump_pdu(s, ent);
1228                 trace_seq_printf(s, "[%d]\n", t_error(ent));
1229         } else {
1230                 if (t_sec(ent))
1231                         trace_seq_printf(s, "%llu + %u [%d]\n",
1232                                          t_sector(ent),
1233                                          t_sec(ent), t_error(ent));
1234                 else
1235                         trace_seq_printf(s, "%llu [%d]\n",
1236                                          t_sector(ent), t_error(ent));
1237         }
1238 }
1239
1240 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1241 {
1242         struct blk_io_trace_remap r = { .device_from = 0, };
1243
1244         get_pdu_remap(ent, &r);
1245         trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1246                          t_sector(ent), t_sec(ent),
1247                          MAJOR(r.device_from), MINOR(r.device_from),
1248                          (unsigned long long)r.sector_from);
1249 }
1250
1251 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1252 {
1253         char cmd[TASK_COMM_LEN];
1254
1255         trace_find_cmdline(ent->pid, cmd);
1256
1257         trace_seq_printf(s, "[%s]\n", cmd);
1258 }
1259
1260 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1261 {
1262         char cmd[TASK_COMM_LEN];
1263
1264         trace_find_cmdline(ent->pid, cmd);
1265
1266         trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1267 }
1268
1269 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1270 {
1271         char cmd[TASK_COMM_LEN];
1272
1273         trace_find_cmdline(ent->pid, cmd);
1274
1275         trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1276                          get_pdu_int(ent), cmd);
1277 }
1278
1279 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1280 {
1281         const struct blk_io_trace *t = te_blk_io_trace(ent);
1282
1283         trace_seq_putmem(s, t + 1, t->pdu_len);
1284         trace_seq_putc(s, '\n');
1285 }
1286
1287 /*
1288  * struct tracer operations
1289  */
1290
1291 static void blk_tracer_print_header(struct seq_file *m)
1292 {
1293         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1294                 return;
1295         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1296                     "#  |     |     |           |   |   |\n");
1297 }
1298
1299 static void blk_tracer_start(struct trace_array *tr)
1300 {
1301         blk_tracer_enabled = true;
1302 }
1303
1304 static int blk_tracer_init(struct trace_array *tr)
1305 {
1306         blk_tr = tr;
1307         blk_tracer_start(tr);
1308         return 0;
1309 }
1310
1311 static void blk_tracer_stop(struct trace_array *tr)
1312 {
1313         blk_tracer_enabled = false;
1314 }
1315
1316 static void blk_tracer_reset(struct trace_array *tr)
1317 {
1318         blk_tracer_stop(tr);
1319 }
1320
1321 static const struct {
1322         const char *act[2];
1323         void       (*print)(struct trace_seq *s, const struct trace_entry *ent);
1324 } what2act[] = {
1325         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1326         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1327         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1328         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1329         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1330         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1331         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1332         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1333         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1334         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1335         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1336         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1337         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1338         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1339         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1340 };
1341
1342 static enum print_line_t print_one_line(struct trace_iterator *iter,
1343                                         bool classic)
1344 {
1345         struct trace_array *tr = iter->tr;
1346         struct trace_seq *s = &iter->seq;
1347         const struct blk_io_trace *t;
1348         u16 what;
1349         bool long_act;
1350         blk_log_action_t *log_action;
1351
1352         t          = te_blk_io_trace(iter->ent);
1353         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1354         long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1355         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1356
1357         if (t->action == BLK_TN_MESSAGE) {
1358                 log_action(iter, long_act ? "message" : "m");
1359                 blk_log_msg(s, iter->ent);
1360                 return trace_handle_return(s);
1361         }
1362
1363         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1364                 trace_seq_printf(s, "Unknown action %x\n", what);
1365         else {
1366                 log_action(iter, what2act[what].act[long_act]);
1367                 what2act[what].print(s, iter->ent);
1368         }
1369
1370         return trace_handle_return(s);
1371 }
1372
1373 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1374                                                int flags, struct trace_event *event)
1375 {
1376         return print_one_line(iter, false);
1377 }
1378
1379 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1380 {
1381         struct trace_seq *s = &iter->seq;
1382         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1383         const int offset = offsetof(struct blk_io_trace, sector);
1384         struct blk_io_trace old = {
1385                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1386                 .time     = iter->ts,
1387         };
1388
1389         trace_seq_putmem(s, &old, offset);
1390         trace_seq_putmem(s, &t->sector,
1391                          sizeof(old) - offset + t->pdu_len);
1392 }
1393
1394 static enum print_line_t
1395 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1396                              struct trace_event *event)
1397 {
1398         blk_trace_synthesize_old_trace(iter);
1399
1400         return trace_handle_return(&iter->seq);
1401 }
1402
1403 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1404 {
1405         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1406                 return TRACE_TYPE_UNHANDLED;
1407
1408         return print_one_line(iter, true);
1409 }
1410
1411 static int
1412 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1413 {
1414         /* don't output context-info for blk_classic output */
1415         if (bit == TRACE_BLK_OPT_CLASSIC) {
1416                 if (set)
1417                         tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1418                 else
1419                         tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1420         }
1421         return 0;
1422 }
1423
1424 static struct tracer blk_tracer __read_mostly = {
1425         .name           = "blk",
1426         .init           = blk_tracer_init,
1427         .reset          = blk_tracer_reset,
1428         .start          = blk_tracer_start,
1429         .stop           = blk_tracer_stop,
1430         .print_header   = blk_tracer_print_header,
1431         .print_line     = blk_tracer_print_line,
1432         .flags          = &blk_tracer_flags,
1433         .set_flag       = blk_tracer_set_flag,
1434 };
1435
1436 static struct trace_event_functions trace_blk_event_funcs = {
1437         .trace          = blk_trace_event_print,
1438         .binary         = blk_trace_event_print_binary,
1439 };
1440
1441 static struct trace_event trace_blk_event = {
1442         .type           = TRACE_BLK,
1443         .funcs          = &trace_blk_event_funcs,
1444 };
1445
1446 static int __init init_blk_tracer(void)
1447 {
1448         if (!register_trace_event(&trace_blk_event)) {
1449                 pr_warn("Warning: could not register block events\n");
1450                 return 1;
1451         }
1452
1453         if (register_tracer(&blk_tracer) != 0) {
1454                 pr_warn("Warning: could not register the block tracer\n");
1455                 unregister_trace_event(&trace_blk_event);
1456                 return 1;
1457         }
1458
1459         return 0;
1460 }
1461
1462 device_initcall(init_blk_tracer);
1463
1464 static int blk_trace_remove_queue(struct request_queue *q)
1465 {
1466         struct blk_trace *bt;
1467
1468         bt = xchg(&q->blk_trace, NULL);
1469         if (bt == NULL)
1470                 return -EINVAL;
1471
1472         if (atomic_dec_and_test(&blk_probes_ref))
1473                 blk_unregister_tracepoints();
1474
1475         blk_trace_free(bt);
1476         return 0;
1477 }
1478
1479 /*
1480  * Setup everything required to start tracing
1481  */
1482 static int blk_trace_setup_queue(struct request_queue *q,
1483                                  struct block_device *bdev)
1484 {
1485         struct blk_trace *bt = NULL;
1486         int ret = -ENOMEM;
1487
1488         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1489         if (!bt)
1490                 return -ENOMEM;
1491
1492         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1493         if (!bt->msg_data)
1494                 goto free_bt;
1495
1496         bt->dev = bdev->bd_dev;
1497         bt->act_mask = (u16)-1;
1498
1499         blk_trace_setup_lba(bt, bdev);
1500
1501         ret = -EBUSY;
1502         if (cmpxchg(&q->blk_trace, NULL, bt))
1503                 goto free_bt;
1504
1505         if (atomic_inc_return(&blk_probes_ref) == 1)
1506                 blk_register_tracepoints();
1507         return 0;
1508
1509 free_bt:
1510         blk_trace_free(bt);
1511         return ret;
1512 }
1513
1514 /*
1515  * sysfs interface to enable and configure tracing
1516  */
1517
1518 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1519                                          struct device_attribute *attr,
1520                                          char *buf);
1521 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1522                                           struct device_attribute *attr,
1523                                           const char *buf, size_t count);
1524 #define BLK_TRACE_DEVICE_ATTR(_name) \
1525         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1526                     sysfs_blk_trace_attr_show, \
1527                     sysfs_blk_trace_attr_store)
1528
1529 static BLK_TRACE_DEVICE_ATTR(enable);
1530 static BLK_TRACE_DEVICE_ATTR(act_mask);
1531 static BLK_TRACE_DEVICE_ATTR(pid);
1532 static BLK_TRACE_DEVICE_ATTR(start_lba);
1533 static BLK_TRACE_DEVICE_ATTR(end_lba);
1534
1535 static struct attribute *blk_trace_attrs[] = {
1536         &dev_attr_enable.attr,
1537         &dev_attr_act_mask.attr,
1538         &dev_attr_pid.attr,
1539         &dev_attr_start_lba.attr,
1540         &dev_attr_end_lba.attr,
1541         NULL
1542 };
1543
1544 struct attribute_group blk_trace_attr_group = {
1545         .name  = "trace",
1546         .attrs = blk_trace_attrs,
1547 };
1548
1549 static const struct {
1550         int mask;
1551         const char *str;
1552 } mask_maps[] = {
1553         { BLK_TC_READ,          "read"          },
1554         { BLK_TC_WRITE,         "write"         },
1555         { BLK_TC_FLUSH,         "flush"         },
1556         { BLK_TC_SYNC,          "sync"          },
1557         { BLK_TC_QUEUE,         "queue"         },
1558         { BLK_TC_REQUEUE,       "requeue"       },
1559         { BLK_TC_ISSUE,         "issue"         },
1560         { BLK_TC_COMPLETE,      "complete"      },
1561         { BLK_TC_FS,            "fs"            },
1562         { BLK_TC_PC,            "pc"            },
1563         { BLK_TC_NOTIFY,        "notify"        },
1564         { BLK_TC_AHEAD,         "ahead"         },
1565         { BLK_TC_META,          "meta"          },
1566         { BLK_TC_DISCARD,       "discard"       },
1567         { BLK_TC_DRV_DATA,      "drv_data"      },
1568         { BLK_TC_FUA,           "fua"           },
1569 };
1570
1571 static int blk_trace_str2mask(const char *str)
1572 {
1573         int i;
1574         int mask = 0;
1575         char *buf, *s, *token;
1576
1577         buf = kstrdup(str, GFP_KERNEL);
1578         if (buf == NULL)
1579                 return -ENOMEM;
1580         s = strstrip(buf);
1581
1582         while (1) {
1583                 token = strsep(&s, ",");
1584                 if (token == NULL)
1585                         break;
1586
1587                 if (*token == '\0')
1588                         continue;
1589
1590                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1591                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1592                                 mask |= mask_maps[i].mask;
1593                                 break;
1594                         }
1595                 }
1596                 if (i == ARRAY_SIZE(mask_maps)) {
1597                         mask = -EINVAL;
1598                         break;
1599                 }
1600         }
1601         kfree(buf);
1602
1603         return mask;
1604 }
1605
1606 static ssize_t blk_trace_mask2str(char *buf, int mask)
1607 {
1608         int i;
1609         char *p = buf;
1610
1611         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1612                 if (mask & mask_maps[i].mask) {
1613                         p += sprintf(p, "%s%s",
1614                                     (p == buf) ? "" : ",", mask_maps[i].str);
1615                 }
1616         }
1617         *p++ = '\n';
1618
1619         return p - buf;
1620 }
1621
1622 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1623 {
1624         if (bdev->bd_disk == NULL)
1625                 return NULL;
1626
1627         return bdev_get_queue(bdev);
1628 }
1629
1630 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1631                                          struct device_attribute *attr,
1632                                          char *buf)
1633 {
1634         struct hd_struct *p = dev_to_part(dev);
1635         struct request_queue *q;
1636         struct block_device *bdev;
1637         ssize_t ret = -ENXIO;
1638
1639         bdev = bdget(part_devt(p));
1640         if (bdev == NULL)
1641                 goto out;
1642
1643         q = blk_trace_get_queue(bdev);
1644         if (q == NULL)
1645                 goto out_bdput;
1646
1647         mutex_lock(&bdev->bd_mutex);
1648
1649         if (attr == &dev_attr_enable) {
1650                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1651                 goto out_unlock_bdev;
1652         }
1653
1654         if (q->blk_trace == NULL)
1655                 ret = sprintf(buf, "disabled\n");
1656         else if (attr == &dev_attr_act_mask)
1657                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1658         else if (attr == &dev_attr_pid)
1659                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1660         else if (attr == &dev_attr_start_lba)
1661                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1662         else if (attr == &dev_attr_end_lba)
1663                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1664
1665 out_unlock_bdev:
1666         mutex_unlock(&bdev->bd_mutex);
1667 out_bdput:
1668         bdput(bdev);
1669 out:
1670         return ret;
1671 }
1672
1673 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1674                                           struct device_attribute *attr,
1675                                           const char *buf, size_t count)
1676 {
1677         struct block_device *bdev;
1678         struct request_queue *q;
1679         struct hd_struct *p;
1680         u64 value;
1681         ssize_t ret = -EINVAL;
1682
1683         if (count == 0)
1684                 goto out;
1685
1686         if (attr == &dev_attr_act_mask) {
1687                 if (sscanf(buf, "%llx", &value) != 1) {
1688                         /* Assume it is a list of trace category names */
1689                         ret = blk_trace_str2mask(buf);
1690                         if (ret < 0)
1691                                 goto out;
1692                         value = ret;
1693                 }
1694         } else if (sscanf(buf, "%llu", &value) != 1)
1695                 goto out;
1696
1697         ret = -ENXIO;
1698
1699         p = dev_to_part(dev);
1700         bdev = bdget(part_devt(p));
1701         if (bdev == NULL)
1702                 goto out;
1703
1704         q = blk_trace_get_queue(bdev);
1705         if (q == NULL)
1706                 goto out_bdput;
1707
1708         mutex_lock(&bdev->bd_mutex);
1709
1710         if (attr == &dev_attr_enable) {
1711                 if (value)
1712                         ret = blk_trace_setup_queue(q, bdev);
1713                 else
1714                         ret = blk_trace_remove_queue(q);
1715                 goto out_unlock_bdev;
1716         }
1717
1718         ret = 0;
1719         if (q->blk_trace == NULL)
1720                 ret = blk_trace_setup_queue(q, bdev);
1721
1722         if (ret == 0) {
1723                 if (attr == &dev_attr_act_mask)
1724                         q->blk_trace->act_mask = value;
1725                 else if (attr == &dev_attr_pid)
1726                         q->blk_trace->pid = value;
1727                 else if (attr == &dev_attr_start_lba)
1728                         q->blk_trace->start_lba = value;
1729                 else if (attr == &dev_attr_end_lba)
1730                         q->blk_trace->end_lba = value;
1731         }
1732
1733 out_unlock_bdev:
1734         mutex_unlock(&bdev->bd_mutex);
1735 out_bdput:
1736         bdput(bdev);
1737 out:
1738         return ret ? ret : count;
1739 }
1740
1741 int blk_trace_init_sysfs(struct device *dev)
1742 {
1743         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1744 }
1745
1746 void blk_trace_remove_sysfs(struct device *dev)
1747 {
1748         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1749 }
1750
1751 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1752
1753 #ifdef CONFIG_EVENT_TRACING
1754
1755 void blk_dump_cmd(char *buf, struct request *rq)
1756 {
1757         int i, end;
1758         int len = rq->cmd_len;
1759         unsigned char *cmd = rq->cmd;
1760
1761         if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1762                 buf[0] = '\0';
1763                 return;
1764         }
1765
1766         for (end = len - 1; end >= 0; end--)
1767                 if (cmd[end])
1768                         break;
1769         end++;
1770
1771         for (i = 0; i < len; i++) {
1772                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1773                 if (i == end && end != len - 1) {
1774                         sprintf(buf, " ..");
1775                         break;
1776                 }
1777         }
1778 }
1779
1780 void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
1781 {
1782         int i = 0;
1783
1784         if (rw & REQ_PREFLUSH)
1785                 rwbs[i++] = 'F';
1786
1787         switch (op) {
1788         case REQ_OP_WRITE:
1789         case REQ_OP_WRITE_SAME:
1790                 rwbs[i++] = 'W';
1791                 break;
1792         case REQ_OP_DISCARD:
1793                 rwbs[i++] = 'D';
1794                 break;
1795         case REQ_OP_FLUSH:
1796                 rwbs[i++] = 'F';
1797                 break;
1798         case REQ_OP_READ:
1799                 rwbs[i++] = 'R';
1800                 break;
1801         default:
1802                 rwbs[i++] = 'N';
1803         }
1804
1805         if (rw & REQ_FUA)
1806                 rwbs[i++] = 'F';
1807         if (rw & REQ_RAHEAD)
1808                 rwbs[i++] = 'A';
1809         if (rw & REQ_SYNC)
1810                 rwbs[i++] = 'S';
1811         if (rw & REQ_META)
1812                 rwbs[i++] = 'M';
1813         if (rw & REQ_SECURE)
1814                 rwbs[i++] = 'E';
1815
1816         rwbs[i] = '\0';
1817 }
1818 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1819
1820 #endif /* CONFIG_EVENT_TRACING */
1821