2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/version.h>
42 #include <linux/platform_device.h>
43 #include <linux/uaccess.h>
45 #include <asm/pgtable.h>
46 #include <soc/bcm2835/raspberrypi-firmware.h>
48 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
50 #include "vchiq_arm.h"
51 #include "vchiq_connected.h"
52 #include "vchiq_killable.h"
53 #include "vchiq_pagelist.h"
55 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
57 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
58 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
63 typedef struct vchiq_2835_state_struct {
65 VCHIQ_ARM_STATE_T arm_state;
66 } VCHIQ_2835_ARM_STATE_T;
68 struct vchiq_pagelist_info {
70 size_t pagelist_buffer_size;
72 enum dma_data_direction dma_dir;
73 unsigned int num_pages;
74 unsigned int pages_need_release;
76 struct scatterlist *scatterlist;
77 unsigned int scatterlist_mapped;
80 static void __iomem *g_regs;
81 static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
82 static unsigned int g_fragments_size;
83 static char *g_fragments_base;
84 static char *g_free_fragments;
85 static struct semaphore g_free_fragments_sema;
86 static struct device *g_dev;
88 extern int vchiq_arm_log_level;
90 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
93 vchiq_doorbell_irq(int irq, void *dev_id);
95 static struct vchiq_pagelist_info *
96 create_pagelist(char __user *buf, size_t count, unsigned short type,
97 struct task_struct *task);
100 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
103 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
105 struct device *dev = &pdev->dev;
106 struct rpi_firmware *fw = platform_get_drvdata(pdev);
107 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
108 struct resource *res;
110 dma_addr_t slot_phys;
112 int slot_mem_size, frag_mem_size;
116 * VCHI messages between the CPU and firmware use
117 * 32-bit bus addresses.
119 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
124 (void)of_property_read_u32(dev->of_node, "cache-line-size",
126 g_fragments_size = 2 * g_cache_line_size;
128 /* Allocate space for the channels in coherent memory */
129 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
130 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
132 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
133 &slot_phys, GFP_KERNEL);
135 dev_err(dev, "could not allocate DMA memory\n");
139 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
141 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
142 if (!vchiq_slot_zero)
145 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
146 (int)slot_phys + slot_mem_size;
147 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
150 g_fragments_base = (char *)slot_mem + slot_mem_size;
151 slot_mem_size += frag_mem_size;
153 g_free_fragments = g_fragments_base;
154 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
155 *(char **)&g_fragments_base[i*g_fragments_size] =
156 &g_fragments_base[(i + 1)*g_fragments_size];
158 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
159 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
161 if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
165 g_regs = devm_ioremap_resource(&pdev->dev, res);
167 return PTR_ERR(g_regs);
169 irq = platform_get_irq(pdev, 0);
171 dev_err(dev, "failed to get IRQ\n");
175 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
176 "VCHIQ doorbell", state);
178 dev_err(dev, "failed to register irq=%d\n", irq);
182 /* Send the base address of the slots to VideoCore */
183 channelbase = slot_phys;
184 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
185 &channelbase, sizeof(channelbase));
186 if (err || channelbase) {
187 dev_err(dev, "failed to set channelbase\n");
188 return err ? : -ENXIO;
192 vchiq_log_info(vchiq_arm_log_level,
193 "vchiq_init - done (slots %pK, phys %pad)",
194 vchiq_slot_zero, &slot_phys);
196 vchiq_call_connected_callbacks();
202 vchiq_platform_init_state(VCHIQ_STATE_T *state)
204 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
205 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
206 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
207 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
208 if (status != VCHIQ_SUCCESS)
210 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
216 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
218 if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
222 return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
226 remote_event_signal(REMOTE_EVENT_T *event)
232 dsb(sy); /* data barrier operation */
235 writel(0, g_regs + BELL2); /* trigger vc interrupt */
239 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
240 void *offset, int size, int dir)
242 struct vchiq_pagelist_info *pagelistinfo;
244 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
246 pagelistinfo = create_pagelist((char __user *)offset, size,
247 (dir == VCHIQ_BULK_RECEIVE)
255 bulk->handle = memhandle;
256 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
259 * Store the pagelistinfo address in remote_data,
260 * which isn't used by the slave.
262 bulk->remote_data = pagelistinfo;
264 return VCHIQ_SUCCESS;
268 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
270 if (bulk && bulk->remote_data && bulk->actual)
271 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
276 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
279 * This should only be called on the master (VideoCore) side, but
280 * provide an implementation to avoid the need for ifdefery.
286 vchiq_dump_platform_state(void *dump_context)
290 len = snprintf(buf, sizeof(buf),
291 " Platform: 2835 (VC master)");
292 vchiq_dump(dump_context, buf, len + 1);
296 vchiq_platform_suspend(VCHIQ_STATE_T *state)
302 vchiq_platform_resume(VCHIQ_STATE_T *state)
304 return VCHIQ_SUCCESS;
308 vchiq_platform_paused(VCHIQ_STATE_T *state)
313 vchiq_platform_resumed(VCHIQ_STATE_T *state)
318 vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
320 return 1; // autosuspend not supported - videocore always wanted
324 vchiq_platform_use_suspend_timer(void)
329 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
331 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
334 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
343 vchiq_doorbell_irq(int irq, void *dev_id)
345 VCHIQ_STATE_T *state = dev_id;
346 irqreturn_t ret = IRQ_NONE;
349 /* Read (and clear) the doorbell */
350 status = readl(g_regs + BELL0);
352 if (status & 0x4) { /* Was the doorbell rung? */
353 remote_event_pollall(state);
361 cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
363 if (pagelistinfo->scatterlist_mapped) {
364 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
365 pagelistinfo->num_pages, pagelistinfo->dma_dir);
368 if (pagelistinfo->pages_need_release) {
371 for (i = 0; i < pagelistinfo->num_pages; i++)
372 put_page(pagelistinfo->pages[i]);
375 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
376 pagelistinfo->pagelist, pagelistinfo->dma_addr);
379 /* There is a potential problem with partial cache lines (pages?)
380 ** at the ends of the block when reading. If the CPU accessed anything in
381 ** the same line (page?) then it may have pulled old data into the cache,
382 ** obscuring the new data underneath. We can solve this by transferring the
383 ** partial cache lines separately, and allowing the ARM to copy into the
387 static struct vchiq_pagelist_info *
388 create_pagelist(char __user *buf, size_t count, unsigned short type,
389 struct task_struct *task)
391 PAGELIST_T *pagelist;
392 struct vchiq_pagelist_info *pagelistinfo;
395 unsigned int num_pages, offset, i, k;
397 size_t pagelist_size;
398 struct scatterlist *scatterlist, *sg;
402 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
403 num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
405 pagelist_size = sizeof(PAGELIST_T) +
406 (num_pages * sizeof(u32)) +
407 (num_pages * sizeof(pages[0]) +
408 (num_pages * sizeof(struct scatterlist))) +
409 sizeof(struct vchiq_pagelist_info);
411 /* Allocate enough storage to hold the page pointers and the page
414 pagelist = dma_zalloc_coherent(g_dev,
419 vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
424 addrs = pagelist->addrs;
425 pages = (struct page **)(addrs + num_pages);
426 scatterlist = (struct scatterlist *)(pages + num_pages);
427 pagelistinfo = (struct vchiq_pagelist_info *)
428 (scatterlist + num_pages);
430 pagelist->length = count;
431 pagelist->type = type;
432 pagelist->offset = offset;
434 /* Populate the fields of the pagelistinfo structure */
435 pagelistinfo->pagelist = pagelist;
436 pagelistinfo->pagelist_buffer_size = pagelist_size;
437 pagelistinfo->dma_addr = dma_addr;
438 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
439 DMA_TO_DEVICE : DMA_FROM_DEVICE;
440 pagelistinfo->num_pages = num_pages;
441 pagelistinfo->pages_need_release = 0;
442 pagelistinfo->pages = pages;
443 pagelistinfo->scatterlist = scatterlist;
444 pagelistinfo->scatterlist_mapped = 0;
446 if (is_vmalloc_addr(buf)) {
447 unsigned long length = count;
448 unsigned int off = offset;
450 for (actual_pages = 0; actual_pages < num_pages;
452 struct page *pg = vmalloc_to_page(buf + (actual_pages *
454 size_t bytes = PAGE_SIZE - off;
458 pages[actual_pages] = pg;
462 /* do not try and release vmalloc pages */
464 down_read(&task->mm->mmap_sem);
465 actual_pages = get_user_pages(
466 (unsigned long)buf & ~(PAGE_SIZE - 1),
468 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
471 up_read(&task->mm->mmap_sem);
473 if (actual_pages != num_pages) {
474 vchiq_log_info(vchiq_arm_log_level,
475 "create_pagelist - only %d/%d pages locked",
479 /* This is probably due to the process being killed */
480 while (actual_pages > 0)
483 put_page(pages[actual_pages]);
485 cleaup_pagelistinfo(pagelistinfo);
488 /* release user pages */
489 pagelistinfo->pages_need_release = 1;
493 * Initialize the scatterlist so that the magic cookie
494 * is filled if debugging is enabled
496 sg_init_table(scatterlist, num_pages);
497 /* Now set the pages for each scatterlist */
498 for (i = 0; i < num_pages; i++)
499 sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
501 dma_buffers = dma_map_sg(g_dev,
504 pagelistinfo->dma_dir);
506 if (dma_buffers == 0) {
507 cleaup_pagelistinfo(pagelistinfo);
511 pagelistinfo->scatterlist_mapped = 1;
513 /* Combine adjacent blocks for performance */
515 for_each_sg(scatterlist, sg, dma_buffers, i) {
516 u32 len = sg_dma_len(sg);
517 u32 addr = sg_dma_address(sg);
519 /* Note: addrs is the address + page_count - 1
520 * The firmware expects the block to be page
521 * aligned and a multiple of the page size
524 WARN_ON(len & ~PAGE_MASK);
525 WARN_ON(addr & ~PAGE_MASK);
527 ((addrs[k - 1] & PAGE_MASK) |
528 ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
530 addrs[k - 1] += (len >> PAGE_SHIFT);
532 addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
536 /* Partial cache lines (fragments) require special measures */
537 if ((type == PAGELIST_READ) &&
538 ((pagelist->offset & (g_cache_line_size - 1)) ||
539 ((pagelist->offset + pagelist->length) &
540 (g_cache_line_size - 1)))) {
543 if (down_interruptible(&g_free_fragments_sema) != 0) {
544 cleaup_pagelistinfo(pagelistinfo);
548 WARN_ON(g_free_fragments == NULL);
550 down(&g_free_fragments_mutex);
551 fragments = g_free_fragments;
552 WARN_ON(fragments == NULL);
553 g_free_fragments = *(char **) g_free_fragments;
554 up(&g_free_fragments_mutex);
555 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
556 (fragments - g_fragments_base) / g_fragments_size;
563 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
567 PAGELIST_T *pagelist = pagelistinfo->pagelist;
568 struct page **pages = pagelistinfo->pages;
569 unsigned int num_pages = pagelistinfo->num_pages;
571 vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
572 pagelistinfo->pagelist, actual);
575 * NOTE: dma_unmap_sg must be called before the
576 * cpu can touch any of the data/pages.
578 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
579 pagelistinfo->num_pages, pagelistinfo->dma_dir);
580 pagelistinfo->scatterlist_mapped = 0;
582 /* Deal with any partial cache lines (fragments) */
583 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
584 char *fragments = g_fragments_base +
585 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
587 int head_bytes, tail_bytes;
588 head_bytes = (g_cache_line_size - pagelist->offset) &
589 (g_cache_line_size - 1);
590 tail_bytes = (pagelist->offset + actual) &
591 (g_cache_line_size - 1);
593 if ((actual >= 0) && (head_bytes != 0)) {
594 if (head_bytes > actual)
597 memcpy((char *)page_address(pages[0]) +
602 if ((actual >= 0) && (head_bytes < actual) &&
604 memcpy((char *)page_address(pages[num_pages - 1]) +
605 ((pagelist->offset + actual) &
606 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
607 fragments + g_cache_line_size,
611 down(&g_free_fragments_mutex);
612 *(char **)fragments = g_free_fragments;
613 g_free_fragments = fragments;
614 up(&g_free_fragments_mutex);
615 up(&g_free_fragments_sema);
618 /* Need to mark all the pages dirty. */
619 if (pagelist->type != PAGELIST_WRITE &&
620 pagelistinfo->pages_need_release) {
621 for (i = 0; i < num_pages; i++)
622 set_page_dirty(pages[i]);
625 cleaup_pagelistinfo(pagelistinfo);