]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/loopback.c
a62e122c2bccc8b3da23e7c31ce4a7d0e40d7edb
[karo-tx-linux.git] / drivers / staging / greybus / loopback.c
1 /*
2  * Loopback bridge driver for the Greybus loopback module.
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
18 #include <linux/fs.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
21 #include <linux/list_sort.h>
22
23 #include <asm/div64.h>
24
25 #include "greybus.h"
26
27 #define NSEC_PER_DAY 86400000000000ULL
28
29 struct gb_loopback_stats {
30         u32 min;
31         u32 max;
32         u64 sum;
33         u32 count;
34 };
35
36 struct gb_loopback_device {
37         struct dentry *root;
38         struct dentry *file;
39         u32 count;
40
41         struct kfifo kfifo;
42         struct mutex mutex;
43         struct list_head list;
44         wait_queue_head_t wq;
45
46         int type;
47         u32 mask;
48         u32 size;
49         u32 iteration_max;
50         u32 iteration_count;
51         size_t size_max;
52         int ms_wait;
53         u32 error;
54
55         struct timeval start;
56         struct timeval end;
57
58         /* Overall stats */
59         struct gb_loopback_stats latency;
60         struct gb_loopback_stats latency_gb;
61         struct gb_loopback_stats throughput;
62         struct gb_loopback_stats requests_per_second;
63 };
64
65 static struct gb_loopback_device gb_dev;
66
67 struct gb_loopback {
68         struct gb_connection *connection;
69
70         struct dentry *file;
71         struct kfifo kfifo_lat;
72         struct kfifo kfifo_ts;
73         struct mutex mutex;
74         struct task_struct *task;
75         struct list_head entry;
76
77         /* Per connection stats */
78         struct gb_loopback_stats latency;
79         struct gb_loopback_stats latency_gb;
80         struct gb_loopback_stats throughput;
81         struct gb_loopback_stats requests_per_second;
82
83         u32 lbid;
84         u32 iteration_count;
85         u64 elapsed_nsecs;
86         u64 elapsed_nsecs_gb;
87         u32 error;
88 };
89
90 #define GB_LOOPBACK_FIFO_DEFAULT                        8192
91
92 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
93 module_param(kfifo_depth, uint, 0444);
94
95 /* Maximum size of any one send data buffer we support */
96 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
97
98 #define GB_LOOPBACK_MS_WAIT_MAX                         1000
99
100 /* interface sysfs attributes */
101 #define gb_loopback_ro_attr(field, pfx, conn)                           \
102 static ssize_t field##_##pfx##_show(struct device *dev,                 \
103                             struct device_attribute *attr,              \
104                             char *buf)                                  \
105 {                                                                       \
106         struct gb_connection *connection;                               \
107         struct gb_loopback *gb;                                         \
108         if (conn) {                                                     \
109                 connection = to_gb_connection(dev);                     \
110                 gb = connection->private;                               \
111                 return sprintf(buf, "%u\n", gb->field);                 \
112         } else {                                                        \
113                 return sprintf(buf, "%u\n", gb_dev.field);              \
114         }                                                               \
115 }                                                                       \
116 static DEVICE_ATTR_RO(field##_##pfx)
117
118 #define gb_loopback_ro_stats_attr(name, field, type, pfx, conn)         \
119 static ssize_t name##_##field##_##pfx##_show(struct device *dev,        \
120                             struct device_attribute *attr,              \
121                             char *buf)                                  \
122 {                                                                       \
123         struct gb_connection *connection;                               \
124         struct gb_loopback *gb;                                         \
125         if (conn) {                                                     \
126                 connection = to_gb_connection(dev);                     \
127                 gb = connection->private;                               \
128                 return sprintf(buf, "%"#type"\n", gb->name.field);      \
129         } else {                                                        \
130                 return sprintf(buf, "%"#type"\n", gb_dev.name.field);   \
131         }                                                               \
132 }                                                                       \
133 static DEVICE_ATTR_RO(name##_##field##_##pfx)
134
135 #define gb_loopback_ro_avg_attr(name, pfx, conn)                        \
136 static ssize_t name##_avg_##pfx##_show(struct device *dev,              \
137                             struct device_attribute *attr,              \
138                             char *buf)                                  \
139 {                                                                       \
140         struct gb_loopback_stats *stats;                                \
141         struct gb_connection *connection;                               \
142         struct gb_loopback *gb;                                         \
143         u64 avg;                                                        \
144         u32 count, rem;                                                 \
145         if (conn) {                                                     \
146                 connection = to_gb_connection(dev);                     \
147                 gb = connection->private;                               \
148                 stats = &gb->name;                                      \
149         } else {                                                        \
150                 stats = &gb_dev.name;                                   \
151         }                                                               \
152         count = stats->count ? stats->count : 1;                        \
153         avg = stats->sum + count / 2;   /* round closest */             \
154         rem = do_div(avg, count);                                       \
155         return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
156 }                                                                       \
157 static DEVICE_ATTR_RO(name##_avg_##pfx)
158
159 #define gb_loopback_stats_attrs(field, pfx, conn)                       \
160         gb_loopback_ro_stats_attr(field, min, u, pfx, conn);            \
161         gb_loopback_ro_stats_attr(field, max, u, pfx, conn);            \
162         gb_loopback_ro_avg_attr(field, pfx, conn)
163
164 #define gb_loopback_attr(field, type)                                   \
165 static ssize_t field##_show(struct device *dev,                         \
166                             struct device_attribute *attr,              \
167                             char *buf)                                  \
168 {                                                                       \
169         struct gb_connection *connection = to_gb_connection(dev);       \
170         struct gb_loopback *gb = connection->private;                   \
171         return sprintf(buf, "%"#type"\n", gb->field);                   \
172 }                                                                       \
173 static ssize_t field##_store(struct device *dev,                        \
174                             struct device_attribute *attr,              \
175                             const char *buf,                            \
176                             size_t len)                                 \
177 {                                                                       \
178         int ret;                                                        \
179         struct gb_connection *connection = to_gb_connection(dev);       \
180         mutex_lock(&gb_dev.mutex);                                      \
181         ret = sscanf(buf, "%"#type, &gb->field);                        \
182         if (ret != 1)                                                   \
183                 len = -EINVAL;                                          \
184         else                                                            \
185                 gb_loopback_check_attr(connection);                     \
186         mutex_unlock(&gb_dev.mutex);                                    \
187         return len;                                                     \
188 }                                                                       \
189 static DEVICE_ATTR_RW(field)
190
191 #define gb_dev_loopback_ro_attr(field, conn)                            \
192 static ssize_t field##_show(struct device *dev,         \
193                             struct device_attribute *attr,              \
194                             char *buf)                                  \
195 {                                                                       \
196         return sprintf(buf, "%u\n", gb_dev.field);                      \
197 }                                                                       \
198 static DEVICE_ATTR_RO(field)
199
200 #define gb_dev_loopback_rw_attr(field, type)                            \
201 static ssize_t field##_show(struct device *dev,                         \
202                             struct device_attribute *attr,              \
203                             char *buf)                                  \
204 {                                                                       \
205         return sprintf(buf, "%"#type"\n", gb_dev.field);                \
206 }                                                                       \
207 static ssize_t field##_store(struct device *dev,                        \
208                             struct device_attribute *attr,              \
209                             const char *buf,                            \
210                             size_t len)                                 \
211 {                                                                       \
212         int ret;                                                        \
213         struct gb_connection *connection = to_gb_connection(dev);       \
214         mutex_lock(&gb_dev.mutex);                                      \
215         ret = sscanf(buf, "%"#type, &gb_dev.field);                     \
216         if (ret != 1)                                                   \
217                 len = -EINVAL;                                          \
218         else                                                            \
219                 gb_loopback_check_attr(&gb_dev, connection);            \
220         mutex_unlock(&gb_dev.mutex);                                    \
221         return len;                                                     \
222 }                                                                       \
223 static DEVICE_ATTR_RW(field)
224
225 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev);
226 static void gb_loopback_check_attr(struct gb_loopback_device *gb_dev,
227                                    struct gb_connection *connection)
228 {
229         struct gb_loopback *gb;
230
231         if (gb_dev->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
232                 gb_dev->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
233         if (gb_dev->size > gb_dev->size_max)
234                 gb_dev->size = gb_dev->size_max;
235         gb_dev->iteration_count = 0;
236         gb_dev->error = 0;
237
238         list_for_each_entry(gb, &gb_dev->list, entry) {
239                 mutex_lock(&gb->mutex);
240                 gb->iteration_count = 0;
241                 gb->error = 0;
242                 if (kfifo_depth < gb_dev->iteration_max) {
243                         dev_warn(&connection->dev,
244                                  "cannot log bytes %u kfifo_depth %u\n",
245                                  gb_dev->iteration_max, kfifo_depth);
246                 }
247                 kfifo_reset_out(&gb->kfifo_lat);
248                 kfifo_reset_out(&gb->kfifo_ts);
249                 mutex_unlock(&gb->mutex);
250         }
251
252         switch (gb_dev->type) {
253         case GB_LOOPBACK_TYPE_PING:
254         case GB_LOOPBACK_TYPE_TRANSFER:
255         case GB_LOOPBACK_TYPE_SINK:
256                 kfifo_reset_out(&gb_dev->kfifo);
257                 gb_loopback_reset_stats(gb_dev);
258                 wake_up(&gb_dev->wq);
259                 break;
260         default:
261                 gb_dev->type = 0;
262                 break;
263         }
264 }
265
266 /* Time to send and receive one message */
267 gb_loopback_stats_attrs(latency, dev, false);
268 gb_loopback_stats_attrs(latency, con, true);
269 /* Time to send and receive one message not including greybus */
270 gb_loopback_stats_attrs(latency_gb, dev, false);
271 gb_loopback_stats_attrs(latency_gb, con, true);
272 /* Number of requests sent per second on this cport */
273 gb_loopback_stats_attrs(requests_per_second, dev, false);
274 gb_loopback_stats_attrs(requests_per_second, con, true);
275 /* Quantity of data sent and received on this cport */
276 gb_loopback_stats_attrs(throughput, dev, false);
277 gb_loopback_stats_attrs(throughput, con, true);
278 /* Number of errors encountered during loop */
279 gb_loopback_ro_attr(error, dev, false);
280 gb_loopback_ro_attr(error, con, true);
281
282 /*
283  * Type of loopback message to send based on protocol type definitions
284  * 0 => Don't send message
285  * 2 => Send ping message continuously (message without payload)
286  * 3 => Send transfer message continuously (message with payload,
287  *                                         payload returned in response)
288  * 4 => Send a sink message (message with payload, no payload in response)
289  */
290 gb_dev_loopback_rw_attr(type, d);
291 /* Size of transfer message payload: 0-4096 bytes */
292 gb_dev_loopback_rw_attr(size, u);
293 /* Time to wait between two messages: 0-1000 ms */
294 gb_dev_loopback_rw_attr(ms_wait, d);
295 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
296 gb_dev_loopback_rw_attr(iteration_max, u);
297 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
298 gb_dev_loopback_ro_attr(iteration_count, false);
299 /* A bit-mask of destination connecitons to include in the test run */
300 gb_dev_loopback_rw_attr(mask, u);
301
302 static struct attribute *loopback_dev_attrs[] = {
303         &dev_attr_latency_min_dev.attr,
304         &dev_attr_latency_max_dev.attr,
305         &dev_attr_latency_avg_dev.attr,
306         &dev_attr_latency_gb_min_dev.attr,
307         &dev_attr_latency_gb_max_dev.attr,
308         &dev_attr_latency_gb_avg_dev.attr,
309         &dev_attr_requests_per_second_min_dev.attr,
310         &dev_attr_requests_per_second_max_dev.attr,
311         &dev_attr_requests_per_second_avg_dev.attr,
312         &dev_attr_throughput_min_dev.attr,
313         &dev_attr_throughput_max_dev.attr,
314         &dev_attr_throughput_avg_dev.attr,
315         &dev_attr_type.attr,
316         &dev_attr_size.attr,
317         &dev_attr_ms_wait.attr,
318         &dev_attr_iteration_count.attr,
319         &dev_attr_iteration_max.attr,
320         &dev_attr_mask.attr,
321         &dev_attr_error_dev.attr,
322         NULL,
323 };
324 ATTRIBUTE_GROUPS(loopback_dev);
325
326 static struct attribute *loopback_con_attrs[] = {
327         &dev_attr_latency_min_con.attr,
328         &dev_attr_latency_max_con.attr,
329         &dev_attr_latency_avg_con.attr,
330         &dev_attr_latency_gb_min_con.attr,
331         &dev_attr_latency_gb_max_con.attr,
332         &dev_attr_latency_gb_avg_con.attr,
333         &dev_attr_requests_per_second_min_con.attr,
334         &dev_attr_requests_per_second_max_con.attr,
335         &dev_attr_requests_per_second_avg_con.attr,
336         &dev_attr_throughput_min_con.attr,
337         &dev_attr_throughput_max_con.attr,
338         &dev_attr_throughput_avg_con.attr,
339         &dev_attr_error_con.attr,
340         NULL,
341 };
342 ATTRIBUTE_GROUPS(loopback_con);
343
344 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
345 {
346         u32 lat;
347
348         do_div(elapsed_nsecs, NSEC_PER_USEC);
349         lat = elapsed_nsecs;
350         return lat;
351 }
352
353 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
354 {
355         if (t2 > t1)
356                 return t2 - t1;
357         else
358                 return NSEC_PER_DAY - t2 + t1;
359 }
360
361 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
362 {
363         u64 t1, t2;
364
365         t1 = timeval_to_ns(ts);
366         t2 = timeval_to_ns(te);
367
368         return __gb_loopback_calc_latency(t1, t2);
369 }
370
371 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
372                                         struct timeval *ts, struct timeval *te)
373 {
374         kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
375         kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
376 }
377
378 static int gb_loopback_active(struct gb_loopback *gb)
379 {
380         return (gb_dev.mask == 0 || (gb_dev.mask & gb->lbid));
381 }
382
383 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
384 {
385         struct timeval ts, te;
386         struct gb_loopback_transfer_request *request;
387         int retval;
388
389         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
390         if (!request)
391                 return -ENOMEM;
392
393         request->len = cpu_to_le32(len);
394
395         do_gettimeofday(&ts);
396         retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_SINK,
397                                    request, len + sizeof(*request), NULL, 0);
398
399         do_gettimeofday(&te);
400
401         /* Calculate the total time the message took */
402         gb_loopback_push_latency_ts(gb, &ts, &te);
403         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
404
405         /* Calculate non-greybus related component of the latency */
406         gb_connection_pop_timestamp(gb->connection, &ts);
407         gb_connection_pop_timestamp(gb->connection, &te);
408         gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
409
410
411         kfree(request);
412         return retval;
413 }
414
415 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
416 {
417         struct timeval ts, te;
418         struct gb_loopback_transfer_request *request;
419         struct gb_loopback_transfer_response *response;
420         int retval;
421
422         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
423         if (!request)
424                 return -ENOMEM;
425         response = kmalloc(len + sizeof(*response), GFP_KERNEL);
426         if (!response) {
427                 kfree(request);
428                 return -ENOMEM;
429         }
430
431         memset(request->data, 0x5A, len);
432
433         request->len = cpu_to_le32(len);
434
435         do_gettimeofday(&ts);
436         retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_TRANSFER,
437                                    request, len + sizeof(*request),
438                                    response, len + sizeof(*response));
439         do_gettimeofday(&te);
440
441         /* Calculate the total time the message took */
442         gb_loopback_push_latency_ts(gb, &ts, &te);
443         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
444
445         /* Calculate non-greybus related component of the latency */
446         gb_connection_pop_timestamp(gb->connection, &ts);
447         gb_connection_pop_timestamp(gb->connection, &te);
448         gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
449
450         if (retval)
451                 goto gb_error;
452
453         if (memcmp(request->data, response->data, len)) {
454                 dev_err(&gb->connection->dev, "Loopback Data doesn't match\n");
455                 retval = -EREMOTEIO;
456         }
457
458 gb_error:
459         kfree(request);
460         kfree(response);
461
462         return retval;
463 }
464
465 static int gb_loopback_ping(struct gb_loopback *gb)
466 {
467         struct timeval ts, te;
468         int retval;
469
470         do_gettimeofday(&ts);
471         retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_PING,
472                                    NULL, 0, NULL, 0);
473         do_gettimeofday(&te);
474
475         /* Calculate the total time the message took */
476         gb_loopback_push_latency_ts(gb, &ts, &te);
477         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
478
479         /* Calculate non-greybus related component of the latency */
480         gb_connection_pop_timestamp(gb->connection, &ts);
481         gb_connection_pop_timestamp(gb->connection, &te);
482         gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
483
484         return retval;
485 }
486
487 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
488 {
489         struct gb_connection *connection = operation->connection;
490         struct gb_loopback_transfer_request *request;
491         struct gb_loopback_transfer_response *response;
492         size_t len;
493
494         /* By convention, the AP initiates the version operation */
495         switch (type) {
496         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
497                 dev_err(&connection->dev,
498                         "module-initiated version operation\n");
499                 return -EINVAL;
500         case GB_LOOPBACK_TYPE_PING:
501         case GB_LOOPBACK_TYPE_SINK:
502                 return 0;
503         case GB_LOOPBACK_TYPE_TRANSFER:
504                 if (operation->request->payload_size < sizeof(*request)) {
505                         dev_err(&connection->dev,
506                                 "transfer request too small (%zu < %zu)\n",
507                                 operation->request->payload_size,
508                                 sizeof(*request));
509                         return -EINVAL; /* -EMSGSIZE */
510                 }
511                 request = operation->request->payload;
512                 len = le32_to_cpu(request->len);
513                 if (len > gb_dev.size_max) {
514                         dev_err(&connection->dev,
515                                 "transfer request too large (%zu > %zu)\n",
516                                 len, gb_dev.size_max);
517                         return -EINVAL;
518                 }
519
520                 if (len) {
521                         if (!gb_operation_response_alloc(operation, len,
522                                                          GFP_KERNEL)) {
523                                 dev_err(&connection->dev,
524                                         "error allocating response\n");
525                                 return -ENOMEM;
526                         }
527                         response = operation->response->payload;
528                         response->len = cpu_to_le32(len);
529                         memcpy(response->data, request->data, len);
530                 }
531                 return 0;
532         default:
533                 dev_err(&connection->dev,
534                         "unsupported request: %hhu\n", type);
535                 return -EINVAL;
536         }
537 }
538
539 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev)
540 {
541         struct gb_loopback_stats reset = {
542                 .min = U32_MAX,
543         };
544         struct gb_loopback *gb;
545
546         /* Reset per-connection stats */
547         list_for_each_entry(gb, &gb_dev->list, entry) {
548                 mutex_lock(&gb->mutex);
549                 memcpy(&gb->latency, &reset,
550                        sizeof(struct gb_loopback_stats));
551                 memcpy(&gb->latency_gb, &reset,
552                        sizeof(struct gb_loopback_stats));
553                 memcpy(&gb->throughput, &reset,
554                        sizeof(struct gb_loopback_stats));
555                 memcpy(&gb->requests_per_second, &reset,
556                        sizeof(struct gb_loopback_stats));
557                 mutex_unlock(&gb->mutex);
558         }
559
560         /* Reset aggregate stats */
561         memset(&gb_dev->start, 0, sizeof(struct timeval));
562         memset(&gb_dev->end, 0, sizeof(struct timeval));
563         memcpy(&gb_dev->latency, &reset, sizeof(struct gb_loopback_stats));
564         memcpy(&gb_dev->latency_gb, &reset, sizeof(struct gb_loopback_stats));
565         memcpy(&gb_dev->throughput, &reset, sizeof(struct gb_loopback_stats));
566         memcpy(&gb_dev->requests_per_second, &reset,
567                sizeof(struct gb_loopback_stats));
568 }
569
570 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
571 {
572         if (stats->min > val)
573                 stats->min = val;
574         if (stats->max < val)
575                 stats->max = val;
576         stats->sum += val;
577         stats->count++;
578 }
579
580 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
581 {
582         u32 req = USEC_PER_SEC;
583
584         do_div(req, latency);
585         gb_loopback_update_stats(&gb_dev.requests_per_second, req);
586         gb_loopback_update_stats(&gb->requests_per_second, req);
587 }
588
589 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
590 {
591         u32 throughput;
592         u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
593
594         switch (gb_dev.type) {
595         case GB_LOOPBACK_TYPE_PING:
596                 break;
597         case GB_LOOPBACK_TYPE_SINK:
598                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
599                                   gb_dev.size;
600                 break;
601         case GB_LOOPBACK_TYPE_TRANSFER:
602                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
603                                   sizeof(struct gb_loopback_transfer_response) +
604                                   gb_dev.size * 2;
605                 break;
606         default:
607                 return;
608         }
609
610         /* Calculate bytes per second */
611         throughput = USEC_PER_SEC;
612         do_div(throughput, latency);
613         throughput *= aggregate_size;
614         gb_loopback_update_stats(&gb_dev.throughput, throughput);
615         gb_loopback_update_stats(&gb->throughput, throughput);
616 }
617
618 static int gb_loopback_calculate_aggregate_stats(void)
619 {
620         struct gb_loopback *gb;
621         struct timeval ts;
622         struct timeval te;
623         u64 t1, t2;
624         u64 ts_min;
625         u64 te_max;
626         u64 elapsed_nsecs;
627         u32 lat;
628         int i, latched;
629         int rollover = 0;
630
631         for (i = 0; i < gb_dev.iteration_max; i++) {
632                 latched = 0;
633                 ts_min = 0;
634                 te_max = 0;
635                 list_for_each_entry(gb, &gb_dev.list, entry) {
636                         if (!gb_loopback_active(gb))
637                                 continue;
638                         if (kfifo_out(&gb->kfifo_ts, &ts, sizeof(ts)) < sizeof(ts))
639                                 goto error;
640                         if (kfifo_out(&gb->kfifo_ts, &te, sizeof(te)) < sizeof(te))
641                                 goto error;
642                         t1 = timeval_to_ns(&ts);
643                         t2 = timeval_to_ns(&te);
644
645                         /* minimum timestamp is always what we want */
646                         if (latched == 0 || t1 < ts_min)
647                                 ts_min = t1;
648
649                         /* maximum timestamp needs to handle rollover */
650                         if (t2 > t1) {
651                                 if (latched == 0 || t2 > te_max)
652                                         te_max = t2;
653                         } else {
654                                 if (latched == 0 || rollover == 0)
655                                         te_max = t2;
656                                 if (rollover == 1 && t2 > te_max)
657                                         te_max = t2;
658                                 rollover = 1;
659                         }
660                         latched = 1;
661                 }
662                 /* Calculate the aggregate timestamp */
663                 elapsed_nsecs = __gb_loopback_calc_latency(ts_min, te_max);
664                 lat = gb_loopback_nsec_to_usec_latency(elapsed_nsecs);
665                 kfifo_in(&gb_dev.kfifo, (unsigned char *)&lat, sizeof(lat));
666         }
667         return 0;
668 error:
669         kfifo_reset_out(&gb_dev.kfifo);
670         return -ENOMEM;
671 }
672
673 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
674 {
675         u32 lat;
676         u64 tmp;
677
678         /* Express latency in terms of microseconds */
679         lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
680
681         /* Log latency stastic */
682         gb_loopback_update_stats(&gb_dev.latency, lat);
683         gb_loopback_update_stats(&gb->latency, lat);
684
685         /* Raw latency log on a per thread basis */
686         kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
687
688         /* Log throughput and requests using latency as benchmark */
689         gb_loopback_throughput_update(gb, lat);
690         gb_loopback_requests_update(gb, lat);
691
692         /* Calculate the greybus related latency number in nanoseconds */
693         tmp = gb->elapsed_nsecs - gb->elapsed_nsecs_gb;
694         lat = tmp;
695         gb_loopback_update_stats(&gb_dev.latency_gb, lat);
696         gb_loopback_update_stats(&gb->latency_gb, lat);
697 }
698
699 static int gb_loopback_fn(void *data)
700 {
701         int error = 0;
702         int ms_wait = 0;
703         int type;
704         u32 size;
705         u32 low_count;
706         struct gb_loopback *gb = data;
707         struct gb_loopback *gb_list;
708
709         while (1) {
710                 if (!gb_dev.type)
711                         wait_event_interruptible(gb_dev.wq, gb_dev.type ||
712                                                  kthread_should_stop());
713                 if (kthread_should_stop())
714                         break;
715
716                 mutex_lock(&gb_dev.mutex);
717                 if (!gb_loopback_active(gb))
718                         goto unlock_continue;
719                 if (gb_dev.iteration_max) {
720                         /* Determine overall lowest count */
721                         low_count = gb->iteration_count;
722                         list_for_each_entry(gb_list, &gb_dev.list, entry) {
723                                 if (!gb_loopback_active(gb_list))
724                                         continue;
725                                 if (gb_list->iteration_count < low_count)
726                                         low_count = gb_list->iteration_count;
727                         }
728                         /* All threads achieved at least low_count iterations */
729                         if (gb_dev.iteration_count < low_count) {
730                                 gb_dev.iteration_count = low_count;
731                                 sysfs_notify(&gb->connection->dev.kobj, NULL,
732                                              "iteration_count");
733                         }
734                         /* Optionally terminate */
735                         if (gb_dev.iteration_count == gb_dev.iteration_max) {
736                                 gb_loopback_calculate_aggregate_stats();
737                                 gb_dev.type = 0;
738                                 goto unlock_continue;
739                         }
740                 }
741                 size = gb_dev.size;
742                 ms_wait = gb_dev.ms_wait;
743                 type = gb_dev.type;
744                 mutex_unlock(&gb_dev.mutex);
745
746                 mutex_lock(&gb->mutex);
747                 if (gb->iteration_count >= gb_dev.iteration_max) {
748                         /* If this thread finished before siblings then sleep */
749                         ms_wait = 1;
750                         mutex_unlock(&gb->mutex);
751                         goto sleep;
752                 }
753                 /* Else operations to perform */
754                 if (type == GB_LOOPBACK_TYPE_PING)
755                         error = gb_loopback_ping(gb);
756                 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
757                         error = gb_loopback_transfer(gb, size);
758                 else if (type == GB_LOOPBACK_TYPE_SINK)
759                         error = gb_loopback_sink(gb, size);
760                 mutex_unlock(&gb->mutex);
761
762                 mutex_lock(&gb_dev.mutex);
763                 mutex_lock(&gb->mutex);
764
765                 if (error) {
766                         gb_dev.error++;
767                         gb->error++;
768                 }
769                 gb_loopback_calculate_stats(gb);
770                 gb->iteration_count++;
771
772                 mutex_unlock(&gb->mutex);
773 unlock_continue:
774                 mutex_unlock(&gb_dev.mutex);
775 sleep:
776                 if (ms_wait)
777                         msleep(ms_wait);
778         }
779         return 0;
780 }
781
782 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
783                                                  struct kfifo *kfifo,
784                                                  struct mutex *mutex)
785 {
786         u32 latency;
787         int retval;
788
789         if (kfifo_len(kfifo) == 0) {
790                 retval = -EAGAIN;
791                 goto done;
792         }
793
794         mutex_lock(mutex);
795         retval = kfifo_out(kfifo, &latency, sizeof(latency));
796         if (retval > 0) {
797                 seq_printf(s, "%u", latency);
798                 retval = 0;
799         }
800         mutex_unlock(mutex);
801 done:
802         return retval;
803 }
804
805 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
806 {
807         struct gb_loopback *gb = s->private;
808
809         return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
810                                                      &gb->mutex);
811 }
812
813 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
814 {
815         return single_open(file, gb_loopback_dbgfs_latency_show,
816                            inode->i_private);
817 }
818
819 static const struct file_operations gb_loopback_debugfs_latency_ops = {
820         .open           = gb_loopback_latency_open,
821         .read           = seq_read,
822         .llseek         = seq_lseek,
823         .release        = single_release,
824 };
825
826 static int gb_loopback_dbgfs_dev_latency_show(struct seq_file *s, void *unused)
827 {
828         struct gb_loopback_device *gb_dev = s->private;
829
830         return gb_loopback_dbgfs_latency_show_common(s, &gb_dev->kfifo,
831                                                      &gb_dev->mutex);
832 }
833
834 static int gb_loopback_dev_latency_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, gb_loopback_dbgfs_dev_latency_show,
837                            inode->i_private);
838 }
839
840 static const struct file_operations gb_loopback_debugfs_dev_latency_ops = {
841         .open           = gb_loopback_dev_latency_open,
842         .read           = seq_read,
843         .llseek         = seq_lseek,
844         .release        = single_release,
845 };
846
847 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
848                                       struct list_head *lhb)
849 {
850         struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
851         struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
852         struct gb_connection *ca = a->connection;
853         struct gb_connection *cb = b->connection;
854
855         if (ca->bundle->intf->module->module_id <
856             cb->bundle->intf->module->module_id)
857                 return -1;
858         if (cb->bundle->intf->module->module_id <
859             ca->bundle->intf->module->module_id)
860                 return 1;
861         if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
862                 return -1;
863         if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
864                 return 1;
865         if (ca->bundle->id < cb->bundle->id)
866                 return -1;
867         if (cb->bundle->id < ca->bundle->id)
868                 return 1;
869         if (ca->intf_cport_id < cb->intf_cport_id)
870                 return -1;
871         else if (cb->intf_cport_id < ca->intf_cport_id)
872                 return 1;
873
874         return 0;
875 }
876
877 static void gb_loopback_insert_id(struct gb_loopback *gb)
878 {
879         struct gb_loopback *gb_list;
880         u32 new_lbid = 0;
881
882         /* perform an insertion sort */
883         list_add_tail(&gb->entry, &gb_dev.list);
884         list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
885         list_for_each_entry(gb_list, &gb_dev.list, entry) {
886                 gb_list->lbid = 1 << new_lbid;
887                 new_lbid++;
888         }
889 }
890
891 #define DEBUGFS_NAMELEN 32
892
893 static int gb_loopback_connection_init(struct gb_connection *connection)
894 {
895         struct gb_loopback *gb;
896         int retval;
897         char name[DEBUGFS_NAMELEN];
898         struct kobject *kobj = &connection->bundle->intf->module->dev.kobj;
899
900         gb = kzalloc(sizeof(*gb), GFP_KERNEL);
901         if (!gb)
902                 return -ENOMEM;
903         gb_loopback_reset_stats(&gb_dev);
904
905         /* If this is the first connection - create a module endo0:x entries */
906         mutex_lock(&gb_dev.mutex);
907         if (!gb_dev.count) {
908                 snprintf(name, sizeof(name), "raw_latency_endo0:%d",
909                          connection->bundle->intf->module->module_id);
910                 gb_dev.file = debugfs_create_file(name, S_IFREG | S_IRUGO,
911                                                   gb_dev.root, &gb_dev,
912                                   &gb_loopback_debugfs_dev_latency_ops);
913                 retval = sysfs_create_groups(kobj, loopback_dev_groups);
914                 if (retval)
915                         goto out_sysfs;
916
917                 /* Calculate maximum payload */
918                 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
919                 if (gb_dev.size_max <=
920                         sizeof(struct gb_loopback_transfer_request)) {
921                         retval = -EINVAL;
922                         goto out_sysfs_dev;
923                 }
924                 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
925         }
926
927         /* Create per-connection sysfs and debugfs data-points */
928         snprintf(name, sizeof(name), "raw_latency_endo0:%d:%d:%d:%d",
929                 connection->bundle->intf->module->module_id,
930                 connection->bundle->intf->interface_id,
931                 connection->bundle->id,
932                 connection->intf_cport_id);
933         gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
934                                        &gb_loopback_debugfs_latency_ops);
935         gb->connection = connection;
936         connection->private = gb;
937         retval = sysfs_create_groups(&connection->dev.kobj,
938                                      loopback_con_groups);
939         if (retval)
940                 goto out_sysfs_dev;
941
942         /* Allocate kfifo */
943         if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
944                           GFP_KERNEL)) {
945                 retval = -ENOMEM;
946                 goto out_sysfs_conn;
947         }
948         if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
949                           GFP_KERNEL)) {
950                 retval = -ENOMEM;
951                 goto out_kfifo0;
952         }
953
954         /* Fork worker thread */
955         mutex_init(&gb->mutex);
956         gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
957         if (IS_ERR(gb->task)) {
958                 retval = PTR_ERR(gb->task);
959                 goto out_kfifo1;
960         }
961
962         gb_loopback_insert_id(gb);
963         gb_dev.count++;
964         mutex_unlock(&gb_dev.mutex);
965         return 0;
966
967 out_kfifo1:
968         kfifo_free(&gb->kfifo_ts);
969 out_kfifo0:
970         kfifo_free(&gb->kfifo_lat);
971 out_sysfs_conn:
972         sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
973 out_sysfs_dev:
974         if (!gb_dev.count) {
975                 sysfs_remove_groups(kobj, loopback_dev_groups);
976                 debugfs_remove(gb_dev.file);
977         }
978         debugfs_remove(gb->file);
979         connection->private = NULL;
980 out_sysfs:
981         mutex_unlock(&gb_dev.mutex);
982         kfree(gb);
983
984         return retval;
985 }
986
987 static void gb_loopback_connection_exit(struct gb_connection *connection)
988 {
989         struct gb_loopback *gb = connection->private;
990         struct kobject *kobj = &connection->bundle->intf->module->dev.kobj;
991
992         if (!IS_ERR_OR_NULL(gb->task))
993                 kthread_stop(gb->task);
994
995         mutex_lock(&gb_dev.mutex);
996
997         connection->private = NULL;
998         kfifo_free(&gb->kfifo_lat);
999         kfifo_free(&gb->kfifo_ts);
1000         gb_dev.count--;
1001         if (!gb_dev.count) {
1002                 sysfs_remove_groups(kobj, loopback_dev_groups);
1003                 debugfs_remove(gb_dev.file);
1004         }
1005         sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
1006         debugfs_remove(gb->file);
1007         list_del(&gb->entry);
1008         mutex_unlock(&gb_dev.mutex);
1009         kfree(gb);
1010 }
1011
1012 static struct gb_protocol loopback_protocol = {
1013         .name                   = "loopback",
1014         .id                     = GREYBUS_PROTOCOL_LOOPBACK,
1015         .major                  = GB_LOOPBACK_VERSION_MAJOR,
1016         .minor                  = GB_LOOPBACK_VERSION_MINOR,
1017         .connection_init        = gb_loopback_connection_init,
1018         .connection_exit        = gb_loopback_connection_exit,
1019         .request_recv           = gb_loopback_request_recv,
1020 };
1021
1022 static int loopback_init(void)
1023 {
1024         int retval;
1025
1026         init_waitqueue_head(&gb_dev.wq);
1027         INIT_LIST_HEAD(&gb_dev.list);
1028         mutex_init(&gb_dev.mutex);
1029         gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1030
1031         if (kfifo_alloc(&gb_dev.kfifo, kfifo_depth * sizeof(u32), GFP_KERNEL)) {
1032                 retval = -ENOMEM;
1033                 goto error_debugfs;
1034         }
1035
1036         retval = gb_protocol_register(&loopback_protocol);
1037         if (!retval)
1038                 return retval;
1039
1040 error_debugfs:
1041         debugfs_remove_recursive(gb_dev.root);
1042         return retval;
1043 }
1044 module_init(loopback_init);
1045
1046 static void __exit loopback_exit(void)
1047 {
1048         debugfs_remove_recursive(gb_dev.root);
1049         kfifo_free(&gb_dev.kfifo);
1050         gb_protocol_deregister(&loopback_protocol);
1051 }
1052 module_exit(loopback_exit);
1053
1054 MODULE_LICENSE("GPL v2");