1 /****************************************************************************
3 * Copyright (C) 2005 - 2013 by Vivante Corp.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the license, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *****************************************************************************/
22 #include "gc_hal_kernel_precomp.h"
24 #define _GC_OBJ_ZONE gcvZONE_VIDMEM
26 /******************************************************************************\
27 ******************************* Private Functions ******************************
28 \******************************************************************************/
30 /*******************************************************************************
34 ** Split a node on the required byte boundary.
39 ** Pointer to an gckOS object.
41 ** gcuVIDMEM_NODE_PTR Node
42 ** Pointer to the node to split.
45 ** Number of bytes to keep in the node.
54 ** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
61 IN gcuVIDMEM_NODE_PTR Node,
65 gcuVIDMEM_NODE_PTR node;
66 gctPOINTER pointer = gcvNULL;
68 /* Make sure the byte boundary makes sense. */
69 if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
74 /* Allocate a new gcuVIDMEM_NODE object. */
75 if (gcmIS_ERROR(gckOS_Allocate(Os,
76 gcmSIZEOF(gcuVIDMEM_NODE),
85 /* Initialize gcuVIDMEM_NODE structure. */
86 node->VidMem.offset = Node->VidMem.offset + Bytes;
87 node->VidMem.bytes = Node->VidMem.bytes - Bytes;
88 node->VidMem.alignment = 0;
89 node->VidMem.locked = 0;
90 node->VidMem.memory = Node->VidMem.memory;
91 node->VidMem.pool = Node->VidMem.pool;
92 node->VidMem.physical = Node->VidMem.physical;
94 #if gcdUSE_VIDMEM_PER_PID
95 gcmkASSERT(Node->VidMem.physical != 0);
96 gcmkASSERT(Node->VidMem.logical != gcvNULL);
97 node->VidMem.processID = Node->VidMem.processID;
98 node->VidMem.physical = Node->VidMem.physical + Bytes;
99 node->VidMem.logical = Node->VidMem.logical + Bytes;
101 node->VidMem.processID = 0;
102 node->VidMem.logical = gcvNULL;
106 /* Insert node behind specified node. */
107 node->VidMem.next = Node->VidMem.next;
108 node->VidMem.prev = Node;
109 Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
111 /* Insert free node behind specified node. */
112 node->VidMem.nextFree = Node->VidMem.nextFree;
113 node->VidMem.prevFree = Node;
114 Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
116 /* Adjust size of specified node. */
117 Node->VidMem.bytes = Bytes;
123 /*******************************************************************************
127 ** Merge two adjacent nodes together.
132 ** Pointer to an gckOS object.
134 ** gcuVIDMEM_NODE_PTR Node
135 ** Pointer to the first of the two nodes to merge.
145 IN gcuVIDMEM_NODE_PTR Node
148 gcuVIDMEM_NODE_PTR node;
151 /* Save pointer to next node. */
152 node = Node->VidMem.next;
153 #if gcdUSE_VIDMEM_PER_PID
154 /* Check if the nodes are adjacent physically. */
155 if ( ((Node->VidMem.physical + Node->VidMem.bytes) != node->VidMem.physical) ||
156 ((Node->VidMem.logical + Node->VidMem.bytes) != node->VidMem.logical) )
163 /* This is a good time to make sure the heap is not corrupted. */
164 if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
166 /* Corrupted heap. */
168 Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
169 return gcvSTATUS_HEAP_CORRUPTED;
173 /* Adjust byte count. */
174 Node->VidMem.bytes += node->VidMem.bytes;
176 /* Unlink next node from linked list. */
177 Node->VidMem.next = node->VidMem.next;
178 Node->VidMem.nextFree = node->VidMem.nextFree;
180 Node->VidMem.next->VidMem.prev =
181 Node->VidMem.nextFree->VidMem.prevFree = Node;
183 /* Free next node. */
184 status = gcmkOS_SAFE_FREE(Os, node);
188 /******************************************************************************\
189 ******************************* gckVIDMEM API Code ******************************
190 \******************************************************************************/
192 /*******************************************************************************
194 ** gckVIDMEM_ConstructVirtual
196 ** Construct a new gcuVIDMEM_NODE union for virtual memory.
201 ** Pointer to an gckKERNEL object.
204 ** Number of byte to allocate.
208 ** gcuVIDMEM_NODE_PTR * Node
209 ** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
212 gckVIDMEM_ConstructVirtual(
214 IN gctBOOL Contiguous,
216 OUT gcuVIDMEM_NODE_PTR * Node
221 gcuVIDMEM_NODE_PTR node = gcvNULL;
222 gctPOINTER pointer = gcvNULL;
225 gcmkHEADER_ARG("Kernel=0x%x Contiguous=%d Bytes=%lu", Kernel, Contiguous, Bytes);
227 /* Verify the arguments. */
228 gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
229 gcmkVERIFY_ARGUMENT(Bytes > 0);
230 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
232 /* Extract the gckOS object pointer. */
234 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
236 /* Allocate an gcuVIDMEM_NODE union. */
237 gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
241 /* Initialize gcuVIDMEM_NODE union for virtual memory. */
242 node->Virtual.kernel = Kernel;
243 node->Virtual.contiguous = Contiguous;
244 node->Virtual.logical = gcvNULL;
246 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
248 node->Virtual.lockeds[i] = 0;
249 node->Virtual.pageTables[i] = gcvNULL;
250 node->Virtual.lockKernels[i] = gcvNULL;
253 node->Virtual.mutex = gcvNULL;
255 gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
258 node->Virtual.next = gcvNULL;
259 node->Virtual.freePending = gcvFALSE;
260 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
262 node->Virtual.unlockPendings[i] = gcvFALSE;
266 node->Virtual.freed = gcvFALSE;
268 gcmkONERROR(gckOS_ZeroMemory(&node->Virtual.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
270 /* Create the mutex. */
272 gckOS_CreateMutex(os, &node->Virtual.mutex));
274 /* Allocate the virtual memory. */
276 gckOS_AllocatePagedMemoryEx(os,
277 node->Virtual.contiguous,
278 node->Virtual.bytes = Bytes,
279 &node->Virtual.physical));
284 if (Kernel->core != gcvCORE_VG)
287 gckMMU_InsertNode(Kernel->mmu, node);
291 /* Return pointer to the gcuVIDMEM_NODE union. */
294 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
295 "Created virtual node 0x%x for %u bytes @ 0x%x",
296 node, Bytes, node->Virtual.physical);
299 gcmkFOOTER_ARG("*Node=0x%x", *Node);
306 if (node->Virtual.mutex != gcvNULL)
308 /* Destroy the mutex. */
309 gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex));
312 /* Free the structure. */
313 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
316 /* Return the status. */
321 /*******************************************************************************
323 ** gckVIDMEM_DestroyVirtual
325 ** Destroy an gcuVIDMEM_NODE union for virtual memory.
329 ** gcuVIDMEM_NODE_PTR Node
330 ** Pointer to a gcuVIDMEM_NODE union.
337 gckVIDMEM_DestroyVirtual(
338 IN gcuVIDMEM_NODE_PTR Node
344 gcmkHEADER_ARG("Node=0x%x", Node);
346 /* Verify the arguments. */
347 gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
349 /* Extact the gckOS object pointer. */
350 os = Node->Virtual.kernel->os;
351 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
356 if (Node->Virtual.kernel->core != gcvCORE_VG)
360 gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node));
364 /* Delete the mutex. */
365 gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex));
367 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
369 if (Node->Virtual.pageTables[i] != gcvNULL)
374 /* Free the pages. */
375 gcmkVERIFY_OK(gckVGMMU_FreePages(Node->Virtual.lockKernels[i]->vg->mmu,
376 Node->Virtual.pageTables[i],
377 Node->Virtual.pageCount));
382 /* Free the pages. */
383 gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.lockKernels[i]->mmu,
384 Node->Virtual.pageTables[i],
385 Node->Virtual.pageCount));
390 /* Delete the gcuVIDMEM_NODE union. */
391 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
398 /*******************************************************************************
400 ** gckVIDMEM_Construct
402 ** Construct a new gckVIDMEM object.
407 ** Pointer to an gckOS object.
409 ** gctUINT32 BaseAddress
410 ** Base address for the video memory heap.
413 ** Number of bytes in the video memory heap.
415 ** gctSIZE_T Threshold
416 ** Minimum number of bytes beyond am allocation before the node is
417 ** split. Can be used as a minimum alignment requirement.
419 ** gctSIZE_T BankSize
420 ** Number of bytes per physical memory bank. Used by bank
425 ** gckVIDMEM * Memory
426 ** Pointer to a variable that will hold the pointer to the gckVIDMEM
432 IN gctUINT32 BaseAddress,
434 IN gctSIZE_T Threshold,
435 IN gctSIZE_T BankSize,
436 OUT gckVIDMEM * Memory
439 gckVIDMEM memory = gcvNULL;
441 gcuVIDMEM_NODE_PTR node;
443 gctPOINTER pointer = gcvNULL;
445 gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
447 Os, BaseAddress, Bytes, Threshold, BankSize);
449 /* Verify the arguments. */
450 gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
451 gcmkVERIFY_ARGUMENT(Bytes > 0);
452 gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
454 /* Allocate the gckVIDMEM object. */
455 gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
459 /* Initialize the gckVIDMEM object. */
460 memory->object.type = gcvOBJ_VIDMEM;
463 /* Set video memory heap information. */
464 memory->baseAddress = BaseAddress;
465 memory->bytes = Bytes;
466 memory->freeBytes = Bytes;
467 memory->threshold = Threshold;
468 memory->mutex = gcvNULL;
469 #if gcdUSE_VIDMEM_PER_PID
470 gcmkONERROR(gckOS_GetProcessID(&memory->pid));
475 /* Walk all possible banks. */
476 for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
482 /* Use all bytes for the first bank. */
487 /* Compute number of bytes for this bank. */
488 bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;
492 /* Make sure we don't exceed the total number of bytes. */
499 /* Mark heap is not used. */
500 memory->sentinel[i].VidMem.next =
501 memory->sentinel[i].VidMem.prev =
502 memory->sentinel[i].VidMem.nextFree =
503 memory->sentinel[i].VidMem.prevFree = gcvNULL;
507 /* Allocate one gcuVIDMEM_NODE union. */
508 gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
512 /* Initialize gcuVIDMEM_NODE union. */
513 node->VidMem.memory = memory;
517 node->VidMem.nextFree =
518 node->VidMem.prevFree = &memory->sentinel[i];
520 node->VidMem.offset = BaseAddress;
521 node->VidMem.bytes = bytes;
522 node->VidMem.alignment = 0;
523 node->VidMem.physical = 0;
524 node->VidMem.pool = gcvPOOL_UNKNOWN;
526 node->VidMem.locked = 0;
528 gcmkONERROR(gckOS_ZeroMemory(&node->VidMem.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
531 #if gcdUSE_VIDMEM_PER_PID
532 node->VidMem.processID = memory->pid;
533 node->VidMem.physical = memory->baseAddress + BaseAddress;
534 gcmkONERROR(gckOS_GetLogicalAddressProcess(Os,
535 node->VidMem.processID,
536 node->VidMem.physical,
537 &node->VidMem.logical));
539 node->VidMem.processID = 0;
540 node->VidMem.logical = gcvNULL;
544 /* Initialize the linked list of nodes. */
545 memory->sentinel[i].VidMem.next =
546 memory->sentinel[i].VidMem.prev =
547 memory->sentinel[i].VidMem.nextFree =
548 memory->sentinel[i].VidMem.prevFree = node;
551 memory->sentinel[i].VidMem.bytes = 0;
553 /* Adjust address for next bank. */
554 BaseAddress += bytes;
559 /* Assign all the bank mappings. */
560 memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
561 memory->mapping[gcvSURF_BITMAP] = banks - 1;
562 if (banks > 1) --banks;
563 memory->mapping[gcvSURF_DEPTH] = banks - 1;
564 memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
565 if (banks > 1) --banks;
566 memory->mapping[gcvSURF_TEXTURE] = banks - 1;
567 if (banks > 1) --banks;
568 memory->mapping[gcvSURF_VERTEX] = banks - 1;
569 if (banks > 1) --banks;
570 memory->mapping[gcvSURF_INDEX] = banks - 1;
571 if (banks > 1) --banks;
572 memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
573 if (banks > 1) --banks;
574 memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
577 memory->mapping[gcvSURF_IMAGE] = 0;
578 memory->mapping[gcvSURF_MASK] = 0;
579 memory->mapping[gcvSURF_SCISSOR] = 0;
582 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
583 "[GALCORE] INDEX: bank %d",
584 memory->mapping[gcvSURF_INDEX]);
585 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
586 "[GALCORE] VERTEX: bank %d",
587 memory->mapping[gcvSURF_VERTEX]);
588 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
589 "[GALCORE] TEXTURE: bank %d",
590 memory->mapping[gcvSURF_TEXTURE]);
591 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
592 "[GALCORE] RENDER_TARGET: bank %d",
593 memory->mapping[gcvSURF_RENDER_TARGET]);
594 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
595 "[GALCORE] DEPTH: bank %d",
596 memory->mapping[gcvSURF_DEPTH]);
597 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
598 "[GALCORE] TILE_STATUS: bank %d",
599 memory->mapping[gcvSURF_TILE_STATUS]);
601 /* Allocate the mutex. */
602 gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
604 /* Return pointer to the gckVIDMEM object. */
608 gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
613 if (memory != gcvNULL)
615 if (memory->mutex != gcvNULL)
617 /* Delete the mutex. */
618 gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
621 for (i = 0; i < banks; ++i)
624 gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
625 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
628 /* Free the object. */
629 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
632 /* Return the status. */
637 /*******************************************************************************
641 ** Destroy an gckVIDMEM object.
646 ** Pointer to an gckVIDMEM object to destroy.
657 gcuVIDMEM_NODE_PTR node, next;
660 gcmkHEADER_ARG("Memory=0x%x", Memory);
662 /* Verify the arguments. */
663 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
665 /* Walk all sentinels. */
666 for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
668 /* Bail out of the heap is not used. */
669 if (Memory->sentinel[i].VidMem.next == gcvNULL)
674 /* Walk all the nodes until we reach the sentinel. */
675 for (node = Memory->sentinel[i].VidMem.next;
676 node->VidMem.bytes != 0;
679 /* Save pointer to the next node. */
680 next = node->VidMem.next;
683 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
687 /* Free the mutex. */
688 gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
690 /* Mark the object as unknown. */
691 Memory->object.type = gcvOBJ_UNKNOWN;
693 /* Free the gckVIDMEM object. */
694 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
701 /*******************************************************************************
703 ** gckVIDMEM_Allocate
705 ** Allocate rectangular memory from the gckVIDMEM object.
710 ** Pointer to an gckVIDMEM object.
713 ** Width of rectangle to allocate. Make sure the width is properly
717 ** Height of rectangle to allocate. Make sure the height is properly
721 ** Depth of rectangle to allocate. This equals to the number of
722 ** rectangles to allocate contiguously (i.e., for cubic maps and volume
725 ** gctUINT BytesPerPixel
726 ** Number of bytes per pixel.
728 ** gctUINT32 Alignment
729 ** Byte alignment for allocation.
732 ** Type of surface to allocate (use by bank optimization).
736 ** gcuVIDMEM_NODE_PTR * Node
737 ** Pointer to a variable that will hold the allocated memory node.
745 IN gctUINT BytesPerPixel,
746 IN gctUINT32 Alignment,
747 IN gceSURF_TYPE Type,
748 OUT gcuVIDMEM_NODE_PTR * Node
754 gcmkHEADER_ARG("Memory=0x%x Width=%u Height=%u Depth=%u BytesPerPixel=%u "
755 "Alignment=%u Type=%d",
756 Memory, Width, Height, Depth, BytesPerPixel, Alignment,
759 /* Verify the arguments. */
760 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
761 gcmkVERIFY_ARGUMENT(Width > 0);
762 gcmkVERIFY_ARGUMENT(Height > 0);
763 gcmkVERIFY_ARGUMENT(Depth > 0);
764 gcmkVERIFY_ARGUMENT(BytesPerPixel > 0);
765 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
767 /* Compute linear size. */
768 bytes = Width * Height * Depth * BytesPerPixel;
770 /* Allocate through linear function. */
772 gckVIDMEM_AllocateLinear(Memory, bytes, Alignment, Type, Node));
775 gcmkFOOTER_ARG("*Node=0x%x", *Node);
779 /* Return the status. */
784 #if gcdENABLE_BANK_ALIGNMENT
786 #if !gcdBANK_BIT_START
787 #error gcdBANK_BIT_START not defined.
791 #error gcdBANK_BIT_END not defined.
793 /*******************************************************************************
794 ** _GetSurfaceBankAlignment
796 ** Return the required offset alignment required to the make BaseAddress
802 ** Pointer to gcoOS object.
805 ** Type of allocation.
807 ** gctUINT32 BaseAddress
808 ** Base address of current video memory node.
812 ** gctUINT32_PTR AlignmentOffset
813 ** Pointer to a variable that will hold the number of bytes to skip in
814 ** the current video memory node in order to make the alignment bank
818 _GetSurfaceBankAlignment(
819 IN gceSURF_TYPE Type,
820 IN gctUINT32 BaseAddress,
821 OUT gctUINT32_PTR AlignmentOffset
825 /* To retrieve the bank. */
826 static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
827 ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
829 /* To retrieve the bank and all the lower bytes. */
830 static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
832 gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
834 /* Verify the arguments. */
835 gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
839 case gcvSURF_RENDER_TARGET:
840 bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
842 /* Align to the first bank. */
843 *AlignmentOffset = (bank == 0) ?
845 ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
849 bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
851 /* Align to the third bank. */
852 *AlignmentOffset = (bank == 2) ?
854 ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
856 /* Add a channel offset at the channel bit. */
857 *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
861 /* no alignment needed. */
862 *AlignmentOffset = 0;
865 /* Return the status. */
866 gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
871 static gcuVIDMEM_NODE_PTR
876 IN gceSURF_TYPE Type,
877 IN OUT gctUINT32_PTR Alignment
880 gcuVIDMEM_NODE_PTR node;
883 #if gcdENABLE_BANK_ALIGNMENT
884 gctUINT32 bankAlignment;
888 if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
890 /* No free nodes left. */
894 #if gcdENABLE_BANK_ALIGNMENT
895 /* Walk all free nodes until we have one that is big enough or we have
896 ** reached the sentinel. */
897 for (node = Memory->sentinel[Bank].VidMem.nextFree;
898 node->VidMem.bytes != 0;
899 node = node->VidMem.nextFree)
901 gcmkONERROR(_GetSurfaceBankAlignment(
903 node->VidMem.memory->baseAddress + node->VidMem.offset,
906 bankAlignment = gcmALIGN(bankAlignment, *Alignment);
908 /* Compute number of bytes to skip for alignment. */
909 alignment = (*Alignment == 0)
911 : (*Alignment - (node->VidMem.offset % *Alignment));
913 if (alignment == *Alignment)
915 /* Node is already aligned. */
919 if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
921 /* This node is big enough. */
922 *Alignment = alignment + bankAlignment;
928 /* Walk all free nodes until we have one that is big enough or we have
929 reached the sentinel. */
930 for (node = Memory->sentinel[Bank].VidMem.nextFree;
931 node->VidMem.bytes != 0;
932 node = node->VidMem.nextFree)
935 gctINT modulo = gckMATH_ModuloInt(node->VidMem.offset, *Alignment);
937 /* Compute number of bytes to skip for alignment. */
938 alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
940 if (alignment == *Alignment)
942 /* Node is already aligned. */
946 if (node->VidMem.bytes >= Bytes + alignment)
948 /* This node is big enough. */
949 *Alignment = alignment;
954 #if gcdENABLE_BANK_ALIGNMENT
957 /* Not enough memory. */
961 /*******************************************************************************
963 ** gckVIDMEM_AllocateLinear
965 ** Allocate linear memory from the gckVIDMEM object.
970 ** Pointer to an gckVIDMEM object.
973 ** Number of bytes to allocate.
975 ** gctUINT32 Alignment
976 ** Byte alignment for allocation.
979 ** Type of surface to allocate (use by bank optimization).
983 ** gcuVIDMEM_NODE_PTR * Node
984 ** Pointer to a variable that will hold the allocated memory node.
987 gckVIDMEM_AllocateLinear(
990 IN gctUINT32 Alignment,
991 IN gceSURF_TYPE Type,
992 OUT gcuVIDMEM_NODE_PTR * Node
996 gcuVIDMEM_NODE_PTR node;
999 gctBOOL acquired = gcvFALSE;
1001 gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
1002 Memory, Bytes, Alignment, Type);
1004 /* Verify the arguments. */
1005 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
1006 gcmkVERIFY_ARGUMENT(Bytes > 0);
1007 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
1008 gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
1010 /* Acquire the mutex. */
1011 gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
1014 #if !gcdUSE_VIDMEM_PER_PID
1016 if (Bytes > Memory->freeBytes)
1018 /* Not enough memory. */
1019 status = gcvSTATUS_OUT_OF_MEMORY;
1024 #if gcdSMALL_BLOCK_SIZE
1025 if ((Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
1026 && (Bytes >= gcdSMALL_BLOCK_SIZE)
1029 /* The left memory is for small memory.*/
1030 status = gcvSTATUS_OUT_OF_MEMORY;
1035 /* Find the default bank for this surface type. */
1036 gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
1037 bank = Memory->mapping[Type];
1038 alignment = Alignment;
1040 #if gcdUSE_VIDMEM_PER_PID
1041 if (Bytes <= Memory->freeBytes)
1044 /* Find a free node in the default bank. */
1045 node = _FindNode(Memory, bank, Bytes, Type, &alignment);
1047 /* Out of memory? */
1048 if (node == gcvNULL)
1050 /* Walk all lower banks. */
1051 for (i = bank - 1; i >= 0; --i)
1053 /* Find a free node inside the current bank. */
1054 node = _FindNode(Memory, i, Bytes, Type, &alignment);
1055 if (node != gcvNULL)
1062 if (node == gcvNULL)
1064 /* Walk all upper banks. */
1065 for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
1067 if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
1069 /* Abort when we reach unused banks. */
1073 /* Find a free node inside the current bank. */
1074 node = _FindNode(Memory, i, Bytes, Type, &alignment);
1075 if (node != gcvNULL)
1081 #if gcdUSE_VIDMEM_PER_PID
1085 if (node == gcvNULL)
1087 /* Out of memory. */
1088 #if gcdUSE_VIDMEM_PER_PID
1089 /* Allocate more memory from shared pool. */
1091 gctPHYS_ADDR physical_temp;
1095 bytes = gcmALIGN(Bytes, gcdUSE_VIDMEM_PER_PID_SIZE);
1097 gcmkONERROR(gckOS_AllocateContiguous(Memory->os,
1103 /* physical address is returned as 0 for user space. workaround. */
1104 if (physical_temp == gcvNULL)
1106 gcmkONERROR(gckOS_GetPhysicalAddress(Memory->os, logical, &physical));
1109 /* Allocate one gcuVIDMEM_NODE union. */
1111 gckOS_Allocate(Memory->os,
1112 gcmSIZEOF(gcuVIDMEM_NODE),
1113 (gctPOINTER *) &node));
1115 /* Initialize gcuVIDMEM_NODE union. */
1116 node->VidMem.memory = Memory;
1118 node->VidMem.offset = 0;
1119 node->VidMem.bytes = bytes;
1120 node->VidMem.alignment = 0;
1121 node->VidMem.physical = physical;
1122 node->VidMem.pool = gcvPOOL_UNKNOWN;
1124 node->VidMem.locked = 0;
1127 gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
1128 node->VidMem.logical = logical;
1129 gcmkASSERT(logical != gcvNULL);
1132 /* Insert node behind sentinel node. */
1133 node->VidMem.next = Memory->sentinel[bank].VidMem.next;
1134 node->VidMem.prev = &Memory->sentinel[bank];
1135 Memory->sentinel[bank].VidMem.next = node->VidMem.next->VidMem.prev = node;
1137 /* Insert free node behind sentinel node. */
1138 node->VidMem.nextFree = Memory->sentinel[bank].VidMem.nextFree;
1139 node->VidMem.prevFree = &Memory->sentinel[bank];
1140 Memory->sentinel[bank].VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
1142 Memory->freeBytes += bytes;
1144 status = gcvSTATUS_OUT_OF_MEMORY;
1149 /* Do we have an alignment? */
1152 /* Split the node so it is aligned. */
1153 if (_Split(Memory->os, node, alignment))
1155 /* Successful split, move to aligned node. */
1156 node = node->VidMem.next;
1158 /* Remove alignment. */
1163 /* Do we have enough memory after the allocation to split it? */
1164 if (node->VidMem.bytes - Bytes > Memory->threshold)
1166 /* Adjust the node size. */
1167 _Split(Memory->os, node, Bytes);
1170 /* Remove the node from the free list. */
1171 node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
1172 node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
1173 node->VidMem.nextFree =
1174 node->VidMem.prevFree = gcvNULL;
1176 /* Fill in the information. */
1177 node->VidMem.alignment = alignment;
1178 node->VidMem.memory = Memory;
1180 #if !gcdUSE_VIDMEM_PER_PID
1181 node->VidMem.logical = gcvNULL;
1182 gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
1184 gcmkASSERT(node->VidMem.logical != gcvNULL);
1188 /* Adjust the number of free bytes. */
1189 Memory->freeBytes -= node->VidMem.bytes;
1191 node->VidMem.freePending = gcvFALSE;
1193 #if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
1194 node->VidMem.kernelVirtual = gcvNULL;
1197 /* Release the mutex. */
1198 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1200 /* Return the pointer to the node. */
1203 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1204 "Allocated %u bytes @ 0x%x [0x%08X]",
1205 node->VidMem.bytes, node, node->VidMem.offset);
1208 gcmkFOOTER_ARG("*Node=0x%x", *Node);
1209 return gcvSTATUS_OK;
1214 /* Release the mutex. */
1215 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1218 /* Return the status. */
1223 /*******************************************************************************
1227 ** Free an allocated video memory node.
1231 ** gcuVIDMEM_NODE_PTR Node
1232 ** Pointer to a gcuVIDMEM_NODE object.
1240 IN gcuVIDMEM_NODE_PTR Node
1244 gckKERNEL kernel = gcvNULL;
1245 gckVIDMEM memory = gcvNULL;
1246 gcuVIDMEM_NODE_PTR node;
1247 gctBOOL mutexAcquired = gcvFALSE;
1249 gctBOOL acquired = gcvFALSE;
1250 gctINT32 i, totalLocked;
1252 gcmkHEADER_ARG("Node=0x%x", Node);
1254 /* Verify the arguments. */
1255 if ((Node == gcvNULL)
1256 || (Node->VidMem.memory == gcvNULL)
1259 /* Invalid object. */
1260 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1263 /**************************** Video Memory ********************************/
1265 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1267 if (Node->VidMem.locked > 0)
1269 /* Client still has a lock, defer free op 'till when lock reaches 0. */
1270 Node->VidMem.freePending = gcvTRUE;
1272 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1273 "Node 0x%x is locked (%d)... deferring free.",
1274 Node, Node->VidMem.locked);
1277 return gcvSTATUS_OK;
1280 /* Extract pointer to gckVIDMEM object owning the node. */
1281 memory = Node->VidMem.memory;
1283 /* Acquire the mutex. */
1285 gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
1287 mutexAcquired = gcvTRUE;
1290 #if !gcdUSE_VIDMEM_PER_PID
1292 Node->VidMem.processID = 0;
1293 Node->VidMem.logical = gcvNULL;
1296 /* Don't try to re-free an already freed node. */
1297 if ((Node->VidMem.nextFree == gcvNULL)
1298 && (Node->VidMem.prevFree == gcvNULL)
1302 #if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
1303 if (Node->VidMem.kernelVirtual)
1305 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1306 "%s(%d) Unmap %x from kernel space.",
1307 __FUNCTION__, __LINE__,
1308 Node->VidMem.kernelVirtual);
1311 gckOS_UnmapPhysical(memory->os,
1312 Node->VidMem.kernelVirtual,
1313 Node->VidMem.bytes));
1315 Node->VidMem.kernelVirtual = gcvNULL;
1319 /* Check if Node is already freed. */
1320 if (Node->VidMem.nextFree)
1322 /* Node is alread freed. */
1323 gcmkONERROR(gcvSTATUS_INVALID_DATA);
1326 /* Update the number of free bytes. */
1327 memory->freeBytes += Node->VidMem.bytes;
1329 /* Find the next free node. */
1330 for (node = Node->VidMem.next;
1331 node != gcvNULL && node->VidMem.nextFree == gcvNULL;
1332 node = node->VidMem.next) ;
1334 /* Insert this node in the free list. */
1335 Node->VidMem.nextFree = node;
1336 Node->VidMem.prevFree = node->VidMem.prevFree;
1338 Node->VidMem.prevFree->VidMem.nextFree =
1339 node->VidMem.prevFree = Node;
1341 /* Is the next node a free node and not the sentinel? */
1342 if ((Node->VidMem.next == Node->VidMem.nextFree)
1343 && (Node->VidMem.next->VidMem.bytes != 0)
1346 /* Merge this node with the next node. */
1347 gcmkONERROR(_Merge(memory->os, node = Node));
1348 gcmkASSERT(node->VidMem.nextFree != node);
1349 gcmkASSERT(node->VidMem.prevFree != node);
1352 /* Is the previous node a free node and not the sentinel? */
1353 if ((Node->VidMem.prev == Node->VidMem.prevFree)
1354 && (Node->VidMem.prev->VidMem.bytes != 0)
1357 /* Merge this node with the previous node. */
1358 gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
1359 gcmkASSERT(node->VidMem.nextFree != node);
1360 gcmkASSERT(node->VidMem.prevFree != node);
1364 /* Release the mutex. */
1365 gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
1367 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1368 "Node 0x%x is freed.",
1373 return gcvSTATUS_OK;
1376 /*************************** Virtual Memory *******************************/
1378 /* Get gckKERNEL object. */
1379 kernel = Node->Virtual.kernel;
1381 /* Verify the gckKERNEL object pointer. */
1382 gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
1384 /* Get the gckOS object pointer. */
1386 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
1388 /* Grab the mutex. */
1390 gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
1394 for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
1396 totalLocked += Node->Virtual.lockeds[i];
1399 if (totalLocked > 0)
1401 gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
1402 "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
1406 Node->Virtual.freed = gcvTRUE;
1408 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1412 /* Free the virtual memory. */
1413 gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
1414 Node->Virtual.physical,
1415 Node->Virtual.bytes));
1417 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1419 /* Destroy the gcuVIDMEM_NODE union. */
1420 gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
1425 return gcvSTATUS_OK;
1430 /* Release the mutex. */
1431 gcmkVERIFY_OK(gckOS_ReleaseMutex(
1432 memory->os, memory->mutex
1438 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1441 /* Return the status. */
1448 /*******************************************************************************
1450 ** gcoVIDMEM_FreeHandleMemory
1452 ** Free all allocated video memory nodes for a handle.
1457 ** Pointer to an gcoVIDMEM object..
1464 gckVIDMEM_FreeHandleMemory(
1465 IN gckKERNEL Kernel,
1466 IN gckVIDMEM Memory,
1471 gctBOOL mutex = gcvFALSE;
1472 gcuVIDMEM_NODE_PTR node;
1474 gctUINT32 nodeCount = 0, byteCount = 0;
1477 gcmkHEADER_ARG("Kernel=0x%x, Memory=0x%x Pid=0x%u", Kernel, Memory, Pid);
1479 gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
1480 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
1482 gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
1485 /* Walk all sentinels. */
1486 for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
1488 /* Bail out of the heap if it is not used. */
1489 if (Memory->sentinel[i].VidMem.next == gcvNULL)
1498 /* Walk all the nodes until we reach the sentinel. */
1499 for (node = Memory->sentinel[i].VidMem.next;
1500 node->VidMem.bytes != 0;
1501 node = node->VidMem.next)
1503 /* Free the node if it was allocated by Handle. */
1504 if (node->VidMem.processID == Pid)
1506 /* Unlock video memory. */
1507 while (node->VidMem.locked > 0)
1509 gckVIDMEM_Unlock(Kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL);
1513 byteCount += node->VidMem.bytes;
1515 /* Free video memory. */
1516 gcmkVERIFY_OK(gckVIDMEM_Free(node));
1519 * Freeing may cause a merge which will invalidate our iteration.
1520 * Don't be clever, just restart.
1526 #if gcdUSE_VIDMEM_PER_PID
1529 gcmkASSERT(node->VidMem.processID == Pid);
1537 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1539 return gcvSTATUS_OK;
1544 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1552 /*******************************************************************************
1554 ** _NeedVirtualMapping
1556 ** Whether setup GPU page table for video node.
1560 ** Pointer to an gckKERNEL object.
1562 ** gcuVIDMEM_NODE_PTR Node
1563 ** Pointer to a gcuVIDMEM_NODE union.
1566 ** Id of current GPU.
1569 ** gctBOOL * NeedMapping
1570 ** A pointer hold the result whether Node should be mapping.
1573 _NeedVirtualMapping(
1574 IN gckKERNEL Kernel,
1576 IN gcuVIDMEM_NODE_PTR Node,
1577 OUT gctBOOL * NeedMapping
1586 gcmkHEADER_ARG("Node=0x%X", Node);
1588 /* Verify the arguments. */
1589 gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
1590 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
1591 gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
1592 gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
1594 if (Node->Virtual.contiguous)
1597 if (Core == gcvCORE_VG)
1599 *NeedMapping = gcvFALSE;
1604 /* For cores which can't access all physical address. */
1605 gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
1606 Node->Virtual.logical,
1609 /* If part of region is belong to gcvPOOL_VIRTUAL,
1610 ** whole region has to be mapped. */
1611 end = phys + Node->Virtual.bytes - 1;
1613 gcmkONERROR(gckHARDWARE_SplitMemory(
1614 Kernel->hardware, end, &pool, &offset
1617 *NeedMapping = (pool == gcvPOOL_VIRTUAL);
1622 *NeedMapping = gcvTRUE;
1625 gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
1626 return gcvSTATUS_OK;
1633 /*******************************************************************************
1637 ** Lock a video memory node and return its hardware specific address.
1642 ** Pointer to an gckKERNEL object.
1644 ** gcuVIDMEM_NODE_PTR Node
1645 ** Pointer to a gcuVIDMEM_NODE union.
1649 ** gctUINT32 * Address
1650 ** Pointer to a variable that will hold the hardware specific address.
1654 IN gckKERNEL Kernel,
1655 IN gcuVIDMEM_NODE_PTR Node,
1656 IN gctBOOL Cacheable,
1657 OUT gctUINT32 * Address
1661 gctBOOL acquired = gcvFALSE;
1662 gctBOOL locked = gcvFALSE;
1664 gctBOOL needMapping;
1665 gctUINT32 baseAddress;
1667 gcmkHEADER_ARG("Node=0x%x", Node);
1669 /* Verify the arguments. */
1670 gcmkVERIFY_ARGUMENT(Address != gcvNULL);
1672 if ((Node == gcvNULL)
1673 || (Node->VidMem.memory == gcvNULL)
1676 /* Invalid object. */
1677 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1680 /**************************** Video Memory ********************************/
1682 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1684 if (Cacheable == gcvTRUE)
1686 gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
1689 /* Increment the lock count. */
1690 Node->VidMem.locked ++;
1692 /* Return the physical address of the node. */
1693 #if !gcdUSE_VIDMEM_PER_PID
1694 *Address = Node->VidMem.memory->baseAddress
1695 + Node->VidMem.offset
1696 + Node->VidMem.alignment;
1698 *Address = Node->VidMem.physical;
1701 /* Get hardware specific address. */
1703 if (Kernel->vg == gcvNULL)
1706 if (Kernel->hardware->mmuVersion == 0)
1708 /* Convert physical to GPU address for old mmu. */
1709 gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
1710 gcmkASSERT(*Address > baseAddress);
1711 *Address -= baseAddress;
1715 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1716 "Locked node 0x%x (%d) @ 0x%08X",
1718 Node->VidMem.locked,
1722 /*************************** Virtual Memory *******************************/
1726 /* Verify the gckKERNEL object pointer. */
1727 gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
1729 /* Extract the gckOS object pointer. */
1730 os = Node->Virtual.kernel->os;
1731 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
1733 /* Grab the mutex. */
1734 gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
1739 Node->Virtual.physical,
1740 Node->Virtual.bytes,
1742 &Node->Virtual.logical,
1743 &Node->Virtual.pageCount));
1745 /* Increment the lock count. */
1746 if (Node->Virtual.lockeds[Kernel->core] ++ == 0)
1748 /* Is this node pending for a final unlock? */
1750 if (!Node->Virtual.contiguous && Node->Virtual.unlockPendings[Kernel->core])
1752 /* Make sure we have a page table. */
1753 gcmkASSERT(Node->Virtual.pageTables[Kernel->core] != gcvNULL);
1755 /* Remove pending unlock. */
1756 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
1759 /* First lock - create a page table. */
1760 gcmkASSERT(Node->Virtual.pageTables[Kernel->core] == gcvNULL);
1762 /* Make sure we mark our node as not flushed. */
1763 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
1768 gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, Node, &needMapping));
1770 if (needMapping == gcvFALSE)
1772 /* Get hardware specific address. */
1774 if (Kernel->vg != gcvNULL)
1776 gcmkONERROR(gckVGHARDWARE_ConvertLogical(Kernel->vg->hardware,
1777 Node->Virtual.logical,
1778 &Node->Virtual.addresses[Kernel->core]));
1783 gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
1784 Node->Virtual.logical,
1785 &Node->Virtual.addresses[Kernel->core]));
1791 if (Kernel->vg != gcvNULL)
1793 /* Allocate pages inside the MMU. */
1795 gckVGMMU_AllocatePages(Kernel->vg->mmu,
1796 Node->Virtual.pageCount,
1797 &Node->Virtual.pageTables[Kernel->core],
1798 &Node->Virtual.addresses[Kernel->core]));
1803 /* Allocate pages inside the MMU. */
1805 gckMMU_AllocatePages(Kernel->mmu,
1806 Node->Virtual.pageCount,
1807 &Node->Virtual.pageTables[Kernel->core],
1808 &Node->Virtual.addresses[Kernel->core]));
1811 Node->Virtual.lockKernels[Kernel->core] = Kernel;
1813 /* Map the pages. */
1816 gckOS_MapPagesEx(os,
1818 Node->Virtual.physical,
1819 Node->Virtual.logical,
1820 Node->Virtual.pageCount,
1821 Node->Virtual.pageTables[Kernel->core]));
1824 gckOS_MapPagesEx(os,
1826 Node->Virtual.physical,
1827 Node->Virtual.pageCount,
1828 Node->Virtual.pageTables[Kernel->core]));
1832 if (Kernel->core == gcvCORE_VG)
1834 gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
1839 gcmkONERROR(gckMMU_Flush(Kernel->mmu));
1842 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1843 "Mapped virtual node 0x%x to 0x%08X",
1845 Node->Virtual.addresses[Kernel->core]);
1848 /* Return hardware address. */
1849 *Address = Node->Virtual.addresses[Kernel->core];
1851 /* Release the mutex. */
1852 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1856 gcmkFOOTER_ARG("*Address=%08x", *Address);
1857 return gcvSTATUS_OK;
1862 if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
1865 if (Kernel->vg != gcvNULL)
1867 /* Free the pages from the MMU. */
1869 gckVGMMU_FreePages(Kernel->vg->mmu,
1870 Node->Virtual.pageTables[Kernel->core],
1871 Node->Virtual.pageCount));
1876 /* Free the pages from the MMU. */
1878 gckMMU_FreePages(Kernel->mmu,
1879 Node->Virtual.pageTables[Kernel->core],
1880 Node->Virtual.pageCount));
1882 Node->Virtual.pageTables[Kernel->core] = gcvNULL;
1883 Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
1886 /* Unlock the pages. */
1888 gckOS_UnlockPages(os,
1889 Node->Virtual.physical,
1890 Node->Virtual.bytes,
1891 Node->Virtual.logical
1894 Node->Virtual.lockeds[Kernel->core]--;
1899 /* Release the mutex. */
1900 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1903 /* Return the status. */
1908 /*******************************************************************************
1912 ** Unlock a video memory node.
1917 ** Pointer to an gckKERNEL object.
1919 ** gcuVIDMEM_NODE_PTR Node
1920 ** Pointer to a locked gcuVIDMEM_NODE union.
1922 ** gceSURF_TYPE Type
1923 ** Type of surface to unlock.
1925 ** gctBOOL * Asynchroneous
1926 ** Pointer to a variable specifying whether the surface should be
1927 ** unlocked asynchroneously or not.
1931 ** gctBOOL * Asynchroneous
1932 ** Pointer to a variable receiving the number of bytes used in the
1933 ** command buffer specified by 'Commands'. If gcvNULL, there is no
1938 IN gckKERNEL Kernel,
1939 IN gcuVIDMEM_NODE_PTR Node,
1940 IN gceSURF_TYPE Type,
1941 IN OUT gctBOOL * Asynchroneous
1945 gckHARDWARE hardware;
1947 gctSIZE_T requested, bufferSize;
1948 gckCOMMAND command = gcvNULL;
1949 gceKERNEL_FLUSH flush;
1951 gctBOOL acquired = gcvFALSE;
1952 gctBOOL commitEntered = gcvFALSE;
1953 gctINT32 i, totalLocked;
1955 gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
1956 Node, Type, gcmOPT_VALUE(Asynchroneous));
1958 /* Verify the arguments. */
1959 if ((Node == gcvNULL)
1960 || (Node->VidMem.memory == gcvNULL)
1963 /* Invalid object. */
1964 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1967 /**************************** Video Memory ********************************/
1969 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1971 if (Node->VidMem.locked <= 0)
1973 /* The surface was not locked. */
1974 status = gcvSTATUS_MEMORY_UNLOCKED;
1978 /* Decrement the lock count. */
1979 Node->VidMem.locked --;
1981 if (Asynchroneous != gcvNULL)
1983 /* No need for any events. */
1984 *Asynchroneous = gcvFALSE;
1987 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1988 "Unlocked node 0x%x (%d)",
1990 Node->VidMem.locked);
1993 /* Unmap the video memory */
1994 if ((Node->VidMem.locked == 0) && (Node->VidMem.logical != gcvNULL))
1996 if (Kernel->core == gcvCORE_VG)
1998 gckKERNEL_UnmapVideoMemory(Kernel,
1999 Node->VidMem.logical,
2000 Node->VidMem.processID,
2001 Node->VidMem.bytes);
2002 Node->VidMem.logical = gcvNULL;
2005 #endif /* __QNXNTO__ */
2007 if (Node->VidMem.freePending && (Node->VidMem.locked == 0))
2009 /* Client has unlocked node previously attempted to be freed by compositor. Free now. */
2010 Node->VidMem.freePending = gcvFALSE;
2011 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2012 "Deferred-freeing Node 0x%x.",
2014 gcmkONERROR(gckVIDMEM_Free(Node));
2018 /*************************** Virtual Memory *******************************/
2022 /* Verify the gckHARDWARE object pointer. */
2023 hardware = Kernel->hardware;
2024 gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
2026 /* Verify the gckCOMMAND object pointer. */
2027 command = Kernel->command;
2028 gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
2030 /* Get the gckOS object pointer. */
2032 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
2034 /* Grab the mutex. */
2036 gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
2040 if (Asynchroneous == gcvNULL)
2042 if (Node->Virtual.lockeds[Kernel->core] == 0)
2044 status = gcvSTATUS_MEMORY_UNLOCKED;
2048 /* Decrement lock count. */
2049 -- Node->Virtual.lockeds[Kernel->core];
2051 /* See if we can unlock the resources. */
2052 if (Node->Virtual.lockeds[Kernel->core] == 0)
2054 /* Free the page table. */
2055 if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
2058 if (Kernel->vg != gcvNULL)
2061 gckVGMMU_FreePages(Kernel->vg->mmu,
2062 Node->Virtual.pageTables[Kernel->core],
2063 Node->Virtual.pageCount));
2069 gckMMU_FreePages(Kernel->mmu,
2070 Node->Virtual.pageTables[Kernel->core],
2071 Node->Virtual.pageCount));
2073 /* Mark page table as freed. */
2074 Node->Virtual.pageTables[Kernel->core] = gcvNULL;
2075 Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
2079 /* Mark node as unlocked. */
2080 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
2084 for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
2086 totalLocked += Node->Virtual.lockeds[i];
2089 if (totalLocked == 0)
2091 /* Owner have already freed this node
2092 ** and we are the last one to unlock, do
2094 if (Node->Virtual.freed)
2096 /* Free the virtual memory. */
2097 gcmkVERIFY_OK(gckOS_FreePagedMemory(Kernel->os,
2098 Node->Virtual.physical,
2099 Node->Virtual.bytes));
2101 /* Release mutex before node is destroyed */
2102 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2104 acquired = gcvFALSE;
2106 /* Destroy the gcuVIDMEM_NODE union. */
2107 gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
2109 /* Node has been destroyed, so we should not touch it any more */
2111 return gcvSTATUS_OK;
2115 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2116 "Unmapped virtual node 0x%x from 0x%08X",
2117 Node, Node->Virtual.addresses[Kernel->core]);
2123 /* If we need to unlock a node from virtual memory we have to be
2124 ** very carefull. If the node is still inside the caches we
2125 ** might get a bus error later if the cache line needs to be
2126 ** replaced. So - we have to flush the caches before we do
2129 /* gckCommand_EnterCommit() can't be called in interrupt handler because
2130 ** of a dead lock situation:
2131 ** process call Command_Commit(), and acquire Command->mutexQueue in
2132 ** gckCOMMAND_EnterCommit(). Then it will wait for a signal which depends
2133 ** on interrupt handler to generate, if interrupt handler enter
2134 ** gckCommand_EnterCommit(), process will never get the signal. */
2136 /* So, flush cache when we still in process context, and then ask caller to
2137 ** schedule a event. */
2140 gckOS_UnlockPages(os,
2141 Node->Virtual.physical,
2142 Node->Virtual.bytes,
2143 Node->Virtual.logical));
2145 if (!Node->Virtual.contiguous
2146 && (Node->Virtual.lockeds[Kernel->core] == 1)
2148 && (Kernel->vg == gcvNULL)
2152 if (Type == gcvSURF_BITMAP)
2154 /* Flush 2D cache. */
2155 flush = gcvFLUSH_2D;
2157 else if (Type == gcvSURF_RENDER_TARGET)
2159 /* Flush color cache. */
2160 flush = gcvFLUSH_COLOR;
2162 else if (Type == gcvSURF_DEPTH)
2164 /* Flush depth cache. */
2165 flush = gcvFLUSH_DEPTH;
2169 /* No flush required. */
2170 flush = (gceKERNEL_FLUSH) 0;
2174 gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));
2178 /* Acquire the command queue. */
2179 gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
2180 commitEntered = gcvTRUE;
2182 gcmkONERROR(gckCOMMAND_Reserve(
2183 command, requested, &buffer, &bufferSize
2186 gcmkONERROR(gckHARDWARE_Flush(
2187 hardware, flush, buffer, &bufferSize
2190 /* Mark node as pending. */
2192 Node->Virtual.unlockPendings[Kernel->core] = gcvTRUE;
2195 gcmkONERROR(gckCOMMAND_Execute(command, requested));
2197 /* Release the command queue. */
2198 gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
2199 commitEntered = gcvFALSE;
2203 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2204 "Scheduled unlock for virtual node 0x%x",
2207 /* Schedule the surface to be unlocked. */
2208 *Asynchroneous = gcvTRUE;
2211 /* Release the mutex. */
2212 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2214 acquired = gcvFALSE;
2218 gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
2219 return gcvSTATUS_OK;
2224 /* Release the command queue mutex. */
2225 gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvFALSE));
2230 /* Release the mutex. */
2231 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2234 /* Return the status. */