2 * ACPI AML interfacing support
4 * Copyright (C) 2015, Intel Corporation
5 * Authors: Lv Zheng <lv.zheng@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "ACPI: AML: " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/wait.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/kthread.h>
21 #include <linux/proc_fs.h>
22 #include <linux/debugfs.h>
23 #include <linux/circ_buf.h>
24 #include <linux/acpi.h>
27 #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
28 #define ACPI_AML_BUF_SIZE PAGE_SIZE
30 #define circ_count(circ) \
31 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
32 #define circ_count_to_end(circ) \
33 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
34 #define circ_space(circ) \
35 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36 #define circ_space_to_end(circ) \
37 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
39 #define ACPI_AML_OPENED 0x0001
40 #define ACPI_AML_CLOSED 0x0002
41 #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
42 #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
43 #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
44 #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
45 #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
46 #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
47 #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
48 #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
51 wait_queue_head_t wait;
55 struct task_struct *thread;
56 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
57 struct circ_buf out_crc;
58 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
59 struct circ_buf in_crc;
60 acpi_osd_exec_callback function;
65 static struct acpi_aml_io acpi_aml_io;
66 static bool acpi_aml_initialized;
67 static struct file *acpi_aml_active_reader;
68 static struct dentry *acpi_aml_dentry;
70 static inline bool __acpi_aml_running(void)
72 return acpi_aml_io.thread ? true : false;
75 static inline bool __acpi_aml_access_ok(unsigned long flag)
78 * The debugger interface is in opened state (OPENED && !CLOSED),
79 * then it is allowed to access the debugger buffers from either
80 * user space or the kernel space.
81 * In addition, for the kernel space, only the debugger thread
82 * (thread ID matched) is allowed to access.
84 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
85 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
86 !__acpi_aml_running())
88 if ((flag & ACPI_AML_KERN) &&
89 current != acpi_aml_io.thread)
94 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
97 * Another read is not in progress and there is data in buffer
100 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
105 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
108 * Another write is not in progress and there is buffer space
109 * available for write.
111 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
116 static inline bool __acpi_aml_busy(void)
118 if (acpi_aml_io.flags & ACPI_AML_BUSY)
123 static inline bool __acpi_aml_opened(void)
125 if (acpi_aml_io.flags & ACPI_AML_OPEN)
130 static inline bool __acpi_aml_used(void)
132 return acpi_aml_io.usages ? true : false;
135 static inline bool acpi_aml_running(void)
139 mutex_lock(&acpi_aml_io.lock);
140 ret = __acpi_aml_running();
141 mutex_unlock(&acpi_aml_io.lock);
145 static bool acpi_aml_busy(void)
149 mutex_lock(&acpi_aml_io.lock);
150 ret = __acpi_aml_busy();
151 mutex_unlock(&acpi_aml_io.lock);
155 static bool acpi_aml_used(void)
160 * The usage count is prepared to avoid race conditions between the
161 * starts and the stops of the debugger thread.
163 mutex_lock(&acpi_aml_io.lock);
164 ret = __acpi_aml_used();
165 mutex_unlock(&acpi_aml_io.lock);
169 static bool acpi_aml_kern_readable(void)
173 mutex_lock(&acpi_aml_io.lock);
174 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
175 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
176 mutex_unlock(&acpi_aml_io.lock);
180 static bool acpi_aml_kern_writable(void)
184 mutex_lock(&acpi_aml_io.lock);
185 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
186 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
187 mutex_unlock(&acpi_aml_io.lock);
191 static bool acpi_aml_user_readable(void)
195 mutex_lock(&acpi_aml_io.lock);
196 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
197 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
198 mutex_unlock(&acpi_aml_io.lock);
202 static bool acpi_aml_user_writable(void)
206 mutex_lock(&acpi_aml_io.lock);
207 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
208 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
209 mutex_unlock(&acpi_aml_io.lock);
213 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
217 mutex_lock(&acpi_aml_io.lock);
218 if (!__acpi_aml_access_ok(flag)) {
222 if (!__acpi_aml_writable(circ, flag)) {
226 acpi_aml_io.flags |= flag;
228 mutex_unlock(&acpi_aml_io.lock);
232 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
236 mutex_lock(&acpi_aml_io.lock);
237 if (!__acpi_aml_access_ok(flag)) {
241 if (!__acpi_aml_readable(circ, flag)) {
245 acpi_aml_io.flags |= flag;
247 mutex_unlock(&acpi_aml_io.lock);
251 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
253 mutex_lock(&acpi_aml_io.lock);
254 acpi_aml_io.flags &= ~flag;
256 wake_up_interruptible(&acpi_aml_io.wait);
257 mutex_unlock(&acpi_aml_io.lock);
260 static int acpi_aml_write_kern(const char *buf, int len)
263 struct circ_buf *crc = &acpi_aml_io.out_crc;
267 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
270 /* sync tail before inserting logs */
272 p = &crc->buf[crc->head];
273 n = min(len, circ_space_to_end(crc));
275 /* sync head after inserting logs */
277 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
278 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
282 static int acpi_aml_readb_kern(void)
285 struct circ_buf *crc = &acpi_aml_io.in_crc;
288 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
291 /* sync head before removing cmds */
293 p = &crc->buf[crc->tail];
295 /* sync tail before inserting cmds */
297 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
298 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
303 * acpi_aml_write_log() - Capture debugger output
304 * @msg: the debugger output
306 * This function should be used to implement acpi_os_printf() to filter out
307 * the debugger output and store the output into the debugger interface
308 * buffer. Return the size of stored logs or errno.
310 static ssize_t acpi_aml_write_log(const char *msg)
313 int count = 0, size = 0;
315 if (!acpi_aml_initialized)
321 ret = acpi_aml_write_kern(msg + size, count);
322 if (ret == -EAGAIN) {
323 ret = wait_event_interruptible(acpi_aml_io.wait,
324 acpi_aml_kern_writable());
326 * We need to retry when the condition
338 return size > 0 ? size : ret;
342 * acpi_aml_read_cmd() - Capture debugger input
343 * @msg: the debugger input
344 * @size: the size of the debugger input
346 * This function should be used to implement acpi_os_get_line() to capture
347 * the debugger input commands and store the input commands into the
348 * debugger interface buffer. Return the size of stored commands or errno.
350 static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
356 * This is ensured by the running fact of the debugger thread
357 * unless a bug is introduced.
359 BUG_ON(!acpi_aml_initialized);
363 * Check each input byte to find the end of the command.
365 ret = acpi_aml_readb_kern();
366 if (ret == -EAGAIN) {
367 ret = wait_event_interruptible(acpi_aml_io.wait,
368 acpi_aml_kern_readable());
370 * We need to retry when the condition becomes
378 *(msg + size) = (char)ret;
383 * acpi_os_get_line() requires a zero terminated command
386 *(msg + size - 1) = '\0';
390 return size > 0 ? size : ret;
393 static int acpi_aml_thread(void *unsed)
395 acpi_osd_exec_callback function = NULL;
398 mutex_lock(&acpi_aml_io.lock);
399 if (acpi_aml_io.function) {
400 acpi_aml_io.usages++;
401 function = acpi_aml_io.function;
402 context = acpi_aml_io.context;
404 mutex_unlock(&acpi_aml_io.lock);
409 mutex_lock(&acpi_aml_io.lock);
410 acpi_aml_io.usages--;
411 if (!__acpi_aml_used()) {
412 acpi_aml_io.thread = NULL;
413 wake_up(&acpi_aml_io.wait);
415 mutex_unlock(&acpi_aml_io.lock);
421 * acpi_aml_create_thread() - Create AML debugger thread
422 * @function: the debugger thread callback
423 * @context: the context to be passed to the debugger thread
425 * This function should be used to implement acpi_os_execute() which is
426 * used by the ACPICA debugger to create the debugger thread.
428 static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
430 struct task_struct *t;
432 mutex_lock(&acpi_aml_io.lock);
433 acpi_aml_io.function = function;
434 acpi_aml_io.context = context;
435 mutex_unlock(&acpi_aml_io.lock);
437 t = kthread_create(acpi_aml_thread, NULL, "aml");
439 pr_err("Failed to create AML debugger thread.\n");
443 mutex_lock(&acpi_aml_io.lock);
444 acpi_aml_io.thread = t;
445 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
447 mutex_unlock(&acpi_aml_io.lock);
451 static int acpi_aml_wait_command_ready(bool single_step,
452 char *buffer, size_t length)
457 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
459 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
461 status = acpi_os_get_line(buffer, length, NULL);
462 if (ACPI_FAILURE(status))
467 static int acpi_aml_notify_command_complete(void)
472 static int acpi_aml_open(struct inode *inode, struct file *file)
477 mutex_lock(&acpi_aml_io.lock);
479 * The debugger interface is being closed, no new user is allowed
480 * during this period.
482 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
486 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
488 * Only one reader is allowed to initiate the debugger
491 if (acpi_aml_active_reader) {
495 pr_debug("Opening debugger reader.\n");
496 acpi_aml_active_reader = file;
500 * No writer is allowed unless the debugger thread is
503 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
508 if (acpi_aml_active_reader == file) {
509 pr_debug("Opening debugger interface.\n");
510 mutex_unlock(&acpi_aml_io.lock);
512 pr_debug("Initializing debugger thread.\n");
513 status = acpi_initialize_debugger();
514 if (ACPI_FAILURE(status)) {
515 pr_err("Failed to initialize debugger.\n");
519 pr_debug("Debugger thread initialized.\n");
521 mutex_lock(&acpi_aml_io.lock);
522 acpi_aml_io.flags |= ACPI_AML_OPENED;
523 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
524 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
525 pr_debug("Debugger interface opened.\n");
530 if (acpi_aml_active_reader == file)
531 acpi_aml_active_reader = NULL;
533 mutex_unlock(&acpi_aml_io.lock);
538 static int acpi_aml_release(struct inode *inode, struct file *file)
540 mutex_lock(&acpi_aml_io.lock);
542 if (file == acpi_aml_active_reader) {
543 pr_debug("Closing debugger reader.\n");
544 acpi_aml_active_reader = NULL;
546 pr_debug("Closing debugger interface.\n");
547 acpi_aml_io.flags |= ACPI_AML_CLOSED;
550 * Wake up all user space/kernel space blocked
553 wake_up_interruptible(&acpi_aml_io.wait);
554 mutex_unlock(&acpi_aml_io.lock);
556 * Wait all user space/kernel space readers/writers to
557 * stop so that ACPICA command loop of the debugger thread
558 * should fail all its command line reads after this point.
560 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
563 * Then we try to terminate the debugger thread if it is
566 pr_debug("Terminating debugger thread.\n");
567 acpi_terminate_debugger();
568 wait_event(acpi_aml_io.wait, !acpi_aml_used());
569 pr_debug("Debugger thread terminated.\n");
571 mutex_lock(&acpi_aml_io.lock);
572 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
574 if (acpi_aml_io.users == 0) {
575 pr_debug("Debugger interface closed.\n");
576 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
578 mutex_unlock(&acpi_aml_io.lock);
582 static int acpi_aml_read_user(char __user *buf, int len)
585 struct circ_buf *crc = &acpi_aml_io.out_crc;
589 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
592 /* sync head before removing logs */
594 p = &crc->buf[crc->tail];
595 n = min(len, circ_count_to_end(crc));
596 if (copy_to_user(buf, p, n)) {
600 /* sync tail after removing logs */
602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
605 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
609 static ssize_t acpi_aml_read(struct file *file, char __user *buf,
610 size_t count, loff_t *ppos)
617 if (!access_ok(VERIFY_WRITE, buf, count))
622 ret = acpi_aml_read_user(buf + size, count);
623 if (ret == -EAGAIN) {
624 if (file->f_flags & O_NONBLOCK)
627 ret = wait_event_interruptible(acpi_aml_io.wait,
628 acpi_aml_user_readable());
630 * We need to retry when the condition
638 if (!acpi_aml_running())
649 return size > 0 ? size : ret;
652 static int acpi_aml_write_user(const char __user *buf, int len)
655 struct circ_buf *crc = &acpi_aml_io.in_crc;
659 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
662 /* sync tail before inserting cmds */
664 p = &crc->buf[crc->head];
665 n = min(len, circ_space_to_end(crc));
666 if (copy_from_user(p, buf, n)) {
670 /* sync head after inserting cmds */
672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
675 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
679 static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
680 size_t count, loff_t *ppos)
687 if (!access_ok(VERIFY_READ, buf, count))
692 ret = acpi_aml_write_user(buf + size, count);
693 if (ret == -EAGAIN) {
694 if (file->f_flags & O_NONBLOCK)
697 ret = wait_event_interruptible(acpi_aml_io.wait,
698 acpi_aml_user_writable());
700 * We need to retry when the condition
708 if (!acpi_aml_running())
718 return size > 0 ? size : ret;
721 static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
725 poll_wait(file, &acpi_aml_io.wait, wait);
726 if (acpi_aml_user_readable())
727 masks |= POLLIN | POLLRDNORM;
728 if (acpi_aml_user_writable())
729 masks |= POLLOUT | POLLWRNORM;
734 static const struct file_operations acpi_aml_operations = {
735 .read = acpi_aml_read,
736 .write = acpi_aml_write,
737 .poll = acpi_aml_poll,
738 .open = acpi_aml_open,
739 .release = acpi_aml_release,
740 .llseek = generic_file_llseek,
743 static const struct acpi_debugger_ops acpi_aml_debugger = {
744 .create_thread = acpi_aml_create_thread,
745 .read_cmd = acpi_aml_read_cmd,
746 .write_log = acpi_aml_write_log,
747 .wait_command_ready = acpi_aml_wait_command_ready,
748 .notify_command_complete = acpi_aml_notify_command_complete,
751 int __init acpi_aml_init(void)
755 if (!acpi_debugfs_dir) {
760 /* Initialize AML IO interface */
761 mutex_init(&acpi_aml_io.lock);
762 init_waitqueue_head(&acpi_aml_io.wait);
763 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
764 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
765 acpi_aml_dentry = debugfs_create_file("acpidbg",
766 S_IFREG | S_IRUGO | S_IWUSR,
767 acpi_debugfs_dir, NULL,
768 &acpi_aml_operations);
769 if (acpi_aml_dentry == NULL) {
773 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
776 acpi_aml_initialized = true;
780 debugfs_remove(acpi_aml_dentry);
781 acpi_aml_dentry = NULL;
787 void __exit acpi_aml_exit(void)
789 if (acpi_aml_initialized) {
790 acpi_unregister_debugger(&acpi_aml_debugger);
791 if (acpi_aml_dentry) {
792 debugfs_remove(acpi_aml_dentry);
793 acpi_aml_dentry = NULL;
795 acpi_aml_initialized = false;
799 module_init(acpi_aml_init);
800 module_exit(acpi_aml_exit);
802 MODULE_AUTHOR("Lv Zheng");
803 MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
804 MODULE_LICENSE("GPL");