1 /****************************************************************************
3 * Copyright (C) 2005 - 2014 by Vivante Corp.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the license, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *****************************************************************************/
22 #include "gc_hal_kernel_precomp.h"
23 #include "gc_hal_kernel_buffer.h"
27 #include "gc_hal_kernel_qnx.h"
30 #define _GC_OBJ_ZONE gcvZONE_EVENT
32 #define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
33 #define gcdEVENT_MIN_THRESHOLD 4
35 /******************************************************************************\
36 ********************************* Support Code *********************************
37 \******************************************************************************/
40 gckEVENT_AllocateQueue(
42 OUT gcsEVENT_QUEUE_PTR * Queue
47 gcmkHEADER_ARG("Event=0x%x", Event);
49 /* Verify the arguments. */
50 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
51 gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
53 /* Do we have free queues? */
54 if (Event->freeList == gcvNULL)
56 gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
59 /* Move one free queue from the free list. */
60 * Queue = Event->freeList;
61 Event->freeList = Event->freeList->next;
64 gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
68 /* Return the status. */
76 OUT gcsEVENT_QUEUE_PTR Queue
79 gceSTATUS status = gcvSTATUS_OK;
81 gcmkHEADER_ARG("Event=0x%x", Event);
83 /* Verify the arguments. */
84 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
85 gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
87 /* Move one free queue from the free list. */
88 Queue->next = Event->freeList;
89 Event->freeList = Queue;
99 IN gcsEVENT_PTR Record
103 gctBOOL acquired = gcvFALSE;
105 gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
107 /* Verify the arguments. */
108 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
109 gcmkVERIFY_ARGUMENT(Record != gcvNULL);
111 /* Acquire the mutex. */
112 gcmkONERROR(gckOS_AcquireMutex(Event->os,
113 Event->freeEventMutex,
117 /* Push the record on the free list. */
118 Record->next = Event->freeEventList;
119 Event->freeEventList = Record;
120 Event->freeEventCount += 1;
122 /* Release the mutex. */
123 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
133 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
136 /* Return the status. */
144 OUT gctBOOL_PTR IsEmpty
150 gcmkHEADER_ARG("Event=0x%x", Event);
152 /* Verify the arguments. */
153 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
154 gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
156 /* Assume the event queue is empty. */
159 /* Walk the event queue. */
160 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
162 /* Check whether this event is in use. */
163 if (Event->queues[i].head != gcvNULL)
165 /* The event is in use, hence the queue is not empty. */
171 /* Try acquiring the mutex. */
172 status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
173 if (status == gcvSTATUS_TIMEOUT)
175 /* Timeout - queue is no longer empty. */
180 /* Bail out on error. */
183 /* Release the mutex. */
184 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
188 gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
192 /* Return the status. */
203 gctBOOL empty = gcvFALSE, idle = gcvFALSE;
204 gctBOOL powerLocked = gcvFALSE;
205 gckHARDWARE hardware;
207 gcmkHEADER_ARG("Event=0x%x", Event);
209 /* Verify the arguments. */
210 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
212 /* Grab gckHARDWARE object. */
213 hardware = Event->kernel->hardware;
214 gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
216 /* Check whether the event queue is empty. */
217 gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
221 status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
222 if (status == gcvSTATUS_TIMEOUT)
228 powerLocked = gcvTRUE;
230 /* Query whether the hardware is idle. */
231 gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
233 gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
234 powerLocked = gcvFALSE;
238 /* Inform the system of idle GPU. */
239 gcmkONERROR(gckOS_Broadcast(Event->os,
240 Event->kernel->hardware,
241 gcvBROADCAST_GPU_IDLE));
251 gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
259 __RemoveRecordFromProcessDB(
261 IN gcsEVENT_PTR Record
264 gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
265 gcmkVERIFY_ARGUMENT(Record != gcvNULL);
267 while (Record != gcvNULL)
269 if (Record->info.command == gcvHAL_SIGNAL)
271 /* TODO: Find a better place to bind signal to hardware.*/
272 gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
273 gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
274 Event->kernel->hardware));
277 if (Record->fromKernel)
279 /* No need to check db if event is from kernel. */
280 Record = Record->next;
284 switch (Record->info.command)
286 case gcvHAL_FREE_NON_PAGED_MEMORY:
287 gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
291 gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
294 case gcvHAL_FREE_CONTIGUOUS_MEMORY:
295 gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
299 gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
302 case gcvHAL_UNLOCK_VIDEO_MEMORY:
303 gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
306 gcvDB_VIDEO_MEMORY_LOCKED,
307 gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
310 case gcvHAL_UNMAP_USER_MEMORY:
311 gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
314 gcvDB_MAP_USER_MEMORY,
315 gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
318 case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
319 gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
322 gcvDB_COMMAND_BUFFER,
323 gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
330 Record = Record->next;
337 _ReleaseVideoMemoryHandle(
339 IN OUT gcsEVENT_PTR Record,
340 IN OUT gcsHAL_INTERFACE * Interface
344 gckVIDMEM_NODE nodeObject;
347 switch(Interface->command)
349 case gcvHAL_UNLOCK_VIDEO_MEMORY:
350 handle = (gctUINT32)Interface->u.UnlockVideoMemory.node;
352 gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
353 Kernel, Record->processID, handle, &nodeObject));
355 Record->info.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(nodeObject);
357 gckVIDMEM_HANDLE_Dereference(Kernel, Record->processID, handle);
369 /*******************************************************************************
373 ** Check the type of surfaces which will be released by current event and
374 ** determine the cache needed to flush.
380 IN gcsEVENT_PTR Record,
381 OUT gceKERNEL_FLUSH *Flush
384 gceKERNEL_FLUSH flush = 0;
385 gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
386 gcmkVERIFY_ARGUMENT(Record != gcvNULL);
388 while (Record != gcvNULL)
390 switch (Record->info.command)
392 case gcvHAL_UNLOCK_VIDEO_MEMORY:
393 switch(Record->info.u.UnlockVideoMemory.type)
395 case gcvSURF_TILE_STATUS:
396 flush |= gcvFLUSH_TILE_STATUS;
398 case gcvSURF_RENDER_TARGET:
399 flush |= gcvFLUSH_COLOR;
402 flush |= gcvFLUSH_DEPTH;
404 case gcvSURF_TEXTURE:
405 flush |= gcvFLUSH_TEXTURE;
407 case gcvSURF_TYPE_UNKNOWN:
414 case gcvHAL_UNMAP_USER_MEMORY:
415 *Flush = gcvFLUSH_ALL;
422 Record = Record->next;
432 _SubmitTimerFunction(
436 gckEVENT event = (gckEVENT)Data;
438 gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
440 gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
444 /******************************************************************************\
445 ******************************* gckEVENT API Code *******************************
446 \******************************************************************************/
448 /*******************************************************************************
450 ** gckEVENT_Construct
452 ** Construct a new gckEVENT object.
457 ** Pointer to an gckKERNEL object.
462 ** Pointer to a variable that receives the gckEVENT object pointer.
472 gckEVENT eventObj = gcvNULL;
475 gctPOINTER pointer = gcvNULL;
477 gcmkHEADER_ARG("Kernel=0x%x", Kernel);
479 /* Verify the arguments. */
480 gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
481 gcmkVERIFY_ARGUMENT(Event != gcvNULL);
483 /* Extract the pointer to the gckOS object. */
485 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
487 /* Allocate the gckEVENT object. */
488 gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
492 /* Reset the object. */
493 gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
495 /* Initialize the gckEVENT object. */
496 eventObj->object.type = gcvOBJ_EVENT;
497 eventObj->kernel = Kernel;
500 /* Create the mutexes. */
501 gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
502 gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
503 gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
505 /* Create a bunch of event reccords. */
506 for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
508 /* Allocate an event record. */
509 gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
513 /* Push it on the free list. */
514 record->next = eventObj->freeEventList;
515 eventObj->freeEventList = record;
516 eventObj->freeEventCount += 1;
519 /* Initialize the free list of event queues. */
520 for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
522 eventObj->repoList[i].next = eventObj->freeList;
523 eventObj->freeList = &eventObj->repoList[i];
526 /* Construct the atom. */
527 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
528 gcmkONERROR(gckOS_AtomSet(os,
530 gcmCOUNTOF(eventObj->queues)));
533 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
536 for (i = 0; i < gcdMULTI_GPU; i++)
538 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3D[i]));
539 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3DMask[i]));
542 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pendingMask));
547 gcmkVERIFY_OK(gckOS_CreateTimer(os,
548 _SubmitTimerFunction,
549 (gctPOINTER)eventObj,
550 &eventObj->submitTimer));
552 #if gcdINTERRUPT_STATISTIC
553 gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->interruptCount));
554 gcmkONERROR(gckOS_AtomSet(os,eventObj->interruptCount, 0));
557 /* Return pointer to the gckEVENT object. */
561 gcmkFOOTER_ARG("*Event=0x%x", *Event);
566 if (eventObj != gcvNULL)
568 if (eventObj->eventQueueMutex != gcvNULL)
570 gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
573 if (eventObj->freeEventMutex != gcvNULL)
575 gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
578 if (eventObj->eventListMutex != gcvNULL)
580 gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
583 while (eventObj->freeEventList != gcvNULL)
585 record = eventObj->freeEventList;
586 eventObj->freeEventList = record->next;
588 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
591 if (eventObj->freeAtom != gcvNULL)
593 gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
597 if (eventObj->pending != gcvNULL)
599 gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
603 for (i = 0; i < gcdMULTI_GPU; i++)
605 if (eventObj->pending3D[i] != gcvNULL)
607 gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3D[i]));
610 if (eventObj->pending3DMask[i] != gcvNULL)
612 gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3DMask[i]));
618 #if gcdINTERRUPT_STATISTIC
619 if (eventObj->interruptCount)
621 gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->interruptCount));
624 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
627 /* Return the status. */
632 /*******************************************************************************
636 ** Destroy an gckEVENT object.
641 ** Pointer to an gckEVENT object.
653 gcsEVENT_QUEUE_PTR queue;
655 gcmkHEADER_ARG("Event=0x%x", Event);
657 /* Verify the arguments. */
658 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
660 if (Event->submitTimer != gcvNULL)
662 gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
663 gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
666 /* Delete the queue mutex. */
667 gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
669 /* Free all free events. */
670 while (Event->freeEventList != gcvNULL)
672 record = Event->freeEventList;
673 Event->freeEventList = record->next;
675 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
678 /* Delete the free mutex. */
679 gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
681 /* Free all pending queues. */
682 while (Event->queueHead != gcvNULL)
684 /* Get the current queue. */
685 queue = Event->queueHead;
687 /* Free all pending events. */
688 while (queue->head != gcvNULL)
690 record = queue->head;
691 queue->head = record->next;
694 gcvLEVEL_WARNING, gcvZONE_EVENT,
695 gcmSIZEOF(record) + gcmSIZEOF(queue->source),
696 "Event record 0x%x is still pending for %d.",
697 record, queue->source
700 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
703 /* Remove the top queue from the list. */
704 if (Event->queueHead == Event->queueTail)
707 Event->queueTail = gcvNULL;
711 Event->queueHead = Event->queueHead->next;
714 /* Free the queue. */
715 gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
718 /* Delete the list mutex. */
719 gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
721 /* Delete the atom. */
722 gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
725 gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
730 for (i = 0; i < gcdMULTI_GPU; i++)
732 gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3D[i]));
733 gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3DMask[i]));
739 #if gcdINTERRUPT_STATISTIC
740 gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->interruptCount));
743 /* Mark the gckEVENT object as unknown. */
744 Event->object.type = gcvOBJ_UNKNOWN;
746 /* Free the gckEVENT object. */
747 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
754 /*******************************************************************************
758 ** Reserve the next available hardware event.
763 ** Pointer to an gckEVENT object.
766 ** Set to gcvTRUE to force the function to wait if no events are
767 ** immediately available.
769 ** gceKERNEL_WHERE Source
770 ** Source of the event.
774 ** gctUINT8 * EventID
775 ** Reserved event ID.
777 #define gcdINVALID_EVENT_PTR ((gcsEVENT_PTR)gcvMAXUINTPTR_T)
784 OUT gctUINT8 * EventID,
785 IN gceKERNEL_WHERE Source,
786 IN gceCORE_3D_MASK ChipEnable
793 OUT gctUINT8 * EventID,
794 IN gceKERNEL_WHERE Source
800 gctBOOL acquired = gcvFALSE;
806 gcmkHEADER_ARG("Event=0x%x Source=%d", Event, Source);
810 /* Grab the queue mutex. */
811 gcmkONERROR(gckOS_AcquireMutex(Event->os,
812 Event->eventQueueMutex,
816 /* Walk through all events. */
818 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
820 gctINT nextID = gckMATH_ModuloInt((id + 1),
821 gcmCOUNTOF(Event->queues));
823 if (Event->queues[id].head == gcvNULL)
825 *EventID = (gctUINT8) id;
827 Event->lastID = (gctUINT8) nextID;
829 /* Save time stamp of event. */
830 Event->queues[id].head = gcdINVALID_EVENT_PTR;
831 Event->queues[id].stamp = ++(Event->stamp);
832 Event->queues[id].source = Source;
835 Event->queues[id].chipEnable = ChipEnable;
837 if (ChipEnable == gcvCORE_3D_ALL_MASK)
839 gckOS_AtomSetMask(Event->pendingMask, (1 << id));
841 for (j = 0; j < gcdMULTI_GPU; j++)
843 gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
848 for (j = 0; j < gcdMULTI_GPU; j++)
850 if (ChipEnable & (1 << j))
852 gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
858 gcmkONERROR(gckOS_AtomDecrement(Event->os,
862 if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
864 gcmkONERROR(gckOS_BroadcastHurry(
866 Event->kernel->hardware,
867 gcdDYNAMIC_EVENT_THRESHOLD - free));
871 /* Release the queue mutex. */
872 gcmkONERROR(gckOS_ReleaseMutex(Event->os,
873 Event->eventQueueMutex));
877 gcvLEVEL_INFO, gcvZONE_EVENT,
883 gcmkFOOTER_ARG("*EventID=%u", *EventID);
891 /* No free events, speed up the GPU right now! */
892 gcmkONERROR(gckOS_BroadcastHurry(Event->os,
893 Event->kernel->hardware,
894 gcdDYNAMIC_EVENT_THRESHOLD));
897 /* Release the queue mutex. */
898 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
901 /* Fail if wait is not requested. */
904 /* Out of resources. */
905 gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
909 gcmkONERROR(gckOS_Delay(Event->os, 1));
915 /* Release the queue mutex. */
916 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
919 /* Return the status. */
924 /*******************************************************************************
926 ** gckEVENT_AllocateRecord
928 ** Allocate a record for the new event.
933 ** Pointer to an gckEVENT object.
935 ** gctBOOL AllocateAllowed
936 ** State for allocation if out of free events.
940 ** gcsEVENT_PTR * Record
941 ** Allocated event record.
944 gckEVENT_AllocateRecord(
946 IN gctBOOL AllocateAllowed,
947 OUT gcsEVENT_PTR * Record
951 gctBOOL acquired = gcvFALSE;
954 gctPOINTER pointer = gcvNULL;
956 gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
958 /* Verify the arguments. */
959 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
960 gcmkVERIFY_ARGUMENT(Record != gcvNULL);
962 /* Acquire the mutex. */
963 gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
966 /* Test if we are below the allocation threshold. */
967 if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
968 (Event->freeEventCount == 0) )
970 /* Allocate a bunch of records. */
971 for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
973 /* Allocate an event record. */
974 gcmkONERROR(gckOS_Allocate(Event->os,
980 /* Push it on the free list. */
981 record->next = Event->freeEventList;
982 Event->freeEventList = record;
983 Event->freeEventCount += 1;
987 *Record = Event->freeEventList;
988 Event->freeEventList = Event->freeEventList->next;
989 Event->freeEventCount -= 1;
991 /* Release the mutex. */
992 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
995 gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
1002 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
1005 /* Return the status. */
1010 /*******************************************************************************
1014 ** Add a new event to the list of events.
1019 ** Pointer to an gckEVENT object.
1021 ** gcsHAL_INTERFACE_PTR Interface
1022 ** Pointer to the interface for the event to be added.
1024 ** gceKERNEL_WHERE FromWhere
1025 ** Place in the pipe where the event needs to be generated.
1027 ** gctBOOL AllocateAllowed
1028 ** State for allocation if out of free events.
1037 IN gcsHAL_INTERFACE_PTR Interface,
1038 IN gceKERNEL_WHERE FromWhere,
1039 IN gctBOOL AllocateAllowed,
1040 IN gctBOOL FromKernel
1044 gctBOOL acquired = gcvFALSE;
1045 gcsEVENT_PTR record = gcvNULL;
1046 gcsEVENT_QUEUE_PTR queue;
1047 gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
1048 gckKERNEL kernel = Event->kernel;
1050 gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
1053 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
1054 "FromWhere=%d AllocateAllowed=%d",
1055 FromWhere, AllocateAllowed);
1057 /* Verify the arguments. */
1058 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1059 gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
1061 /* Verify the event command. */
1063 ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
1064 || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
1065 || (Interface->command == gcvHAL_WRITE_DATA)
1066 || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
1067 || (Interface->command == gcvHAL_SIGNAL)
1068 || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
1069 || (Interface->command == gcvHAL_TIMESTAMP)
1070 || (Interface->command == gcvHAL_COMMIT_DONE)
1071 || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
1072 || (Interface->command == gcvHAL_SYNC_POINT)
1073 || (Interface->command == gcvHAL_DESTROY_MMU)
1076 /* Validate the source. */
1077 if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
1079 /* Invalid argument. */
1080 gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
1083 /* Allocate a free record. */
1084 gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
1086 /* Termninate the record. */
1087 record->next = gcvNULL;
1089 /* Record the committer. */
1090 record->fromKernel = FromKernel;
1092 /* Copy the event interface into the record. */
1093 gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
1095 /* Get process ID. */
1096 gcmkONERROR(gckOS_GetProcessID(&record->processID));
1098 gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
1100 /* Handle is belonged to current process, it must be released now. */
1101 if (FromKernel == gcvFALSE)
1103 status = _ReleaseVideoMemoryHandle(Event->kernel, record, Interface);
1105 if (gcmIS_ERROR(status))
1107 /* Ingore error because there are other events in the queue. */
1108 status = gcvSTATUS_OK;
1114 record->kernel = Event->kernel;
1117 /* Acquire the mutex. */
1118 gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
1121 /* Do we need to allocate a new queue? */
1122 if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
1124 /* Allocate a new queue. */
1125 gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
1127 /* Initialize the queue. */
1128 queue->source = FromWhere;
1129 queue->head = gcvNULL;
1130 queue->next = gcvNULL;
1132 /* Attach it to the list of allocated queues. */
1133 if (Event->queueTail == gcvNULL)
1136 Event->queueTail = queue;
1140 Event->queueTail->next = queue;
1141 Event->queueTail = queue;
1146 queue = Event->queueTail;
1149 /* Attach the record to the queue. */
1150 if (queue->head == gcvNULL)
1152 queue->head = record;
1153 queue->tail = record;
1157 queue->tail->next = record;
1158 queue->tail = record;
1161 /* Unmap user space logical address.
1162 * Linux kernel does not support unmap the memory of other process any more since 3.5.
1163 * Let's unmap memory of self process before submit the event to gpu.
1165 switch(Interface->command)
1167 case gcvHAL_FREE_NON_PAGED_MEMORY:
1168 gcmkONERROR(gckOS_UnmapUserLogical(
1170 gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
1171 (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
1172 gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
1174 case gcvHAL_FREE_CONTIGUOUS_MEMORY:
1175 gcmkONERROR(gckOS_UnmapUserLogical(
1177 gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
1178 (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
1179 gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
1182 case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
1183 buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical);
1184 if (buffer->userLogical)
1186 gcmkONERROR(gckOS_DestroyUserVirtualMapping(
1189 (gctSIZE_T) Interface->u.FreeVirtualCommandBuffer.bytes,
1190 gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
1198 /* Release the mutex. */
1199 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
1203 return gcvSTATUS_OK;
1209 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
1212 if (record != gcvNULL)
1214 gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
1217 /* Return the status. */
1222 /*******************************************************************************
1226 ** Schedule an event to unlock virtual memory.
1231 ** Pointer to an gckEVENT object.
1233 ** gceKERNEL_WHERE FromWhere
1234 ** Place in the pipe where the event needs to be generated.
1236 ** gcuVIDMEM_NODE_PTR Node
1237 ** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
1240 ** gceSURF_TYPE Type
1241 ** Type of surface to unlock.
1250 IN gceKERNEL_WHERE FromWhere,
1252 IN gceSURF_TYPE Type
1256 gcsHAL_INTERFACE iface;
1258 gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
1259 Event, FromWhere, Node, Type);
1261 /* Verify the arguments. */
1262 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1263 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
1265 /* Mark the event as an unlock. */
1266 iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
1267 iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
1268 iface.u.UnlockVideoMemory.type = Type;
1269 iface.u.UnlockVideoMemory.asynchroneous = 0;
1271 /* Append it to the queue. */
1272 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1276 return gcvSTATUS_OK;
1279 /* Return the status. */
1284 /*******************************************************************************
1286 ** gckEVENT_FreeNonPagedMemory
1288 ** Schedule an event to free non-paged memory.
1293 ** Pointer to an gckEVENT object.
1296 ** Number of bytes of non-paged memory to free.
1298 ** gctPHYS_ADDR Physical
1299 ** Physical address of non-paged memory to free.
1301 ** gctPOINTER Logical
1302 ** Logical address of non-paged memory to free.
1304 ** gceKERNEL_WHERE FromWhere
1305 ** Place in the pipe where the event needs to be generated.
1308 gckEVENT_FreeNonPagedMemory(
1311 IN gctPHYS_ADDR Physical,
1312 IN gctPOINTER Logical,
1313 IN gceKERNEL_WHERE FromWhere
1317 gcsHAL_INTERFACE iface;
1318 gckKERNEL kernel = Event->kernel;
1320 gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
1322 Event, Bytes, Physical, Logical, FromWhere);
1324 /* Verify the arguments. */
1325 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1326 gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
1327 gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
1328 gcmkVERIFY_ARGUMENT(Bytes > 0);
1330 /* Create an event. */
1331 iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
1332 iface.u.FreeNonPagedMemory.bytes = Bytes;
1333 iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
1334 iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
1336 /* Append it to the queue. */
1337 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1341 return gcvSTATUS_OK;
1344 /* Return the status. */
1350 gckEVENT_DestroyVirtualCommandBuffer(
1353 IN gctPHYS_ADDR Physical,
1354 IN gctPOINTER Logical,
1355 IN gceKERNEL_WHERE FromWhere
1359 gcsHAL_INTERFACE iface;
1360 gckKERNEL kernel = Event->kernel;
1362 gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
1364 Event, Bytes, Physical, Logical, FromWhere);
1366 /* Verify the arguments. */
1367 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1368 gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
1369 gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
1370 gcmkVERIFY_ARGUMENT(Bytes > 0);
1372 /* Create an event. */
1373 iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
1374 iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
1375 iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
1376 iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
1378 /* Append it to the queue. */
1379 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1383 return gcvSTATUS_OK;
1386 /* Return the status. */
1391 /*******************************************************************************
1393 ** gckEVENT_FreeContigiuousMemory
1395 ** Schedule an event to free contiguous memory.
1400 ** Pointer to an gckEVENT object.
1403 ** Number of bytes of contiguous memory to free.
1405 ** gctPHYS_ADDR Physical
1406 ** Physical address of contiguous memory to free.
1408 ** gctPOINTER Logical
1409 ** Logical address of contiguous memory to free.
1411 ** gceKERNEL_WHERE FromWhere
1412 ** Place in the pipe where the event needs to be generated.
1415 gckEVENT_FreeContiguousMemory(
1418 IN gctPHYS_ADDR Physical,
1419 IN gctPOINTER Logical,
1420 IN gceKERNEL_WHERE FromWhere
1424 gcsHAL_INTERFACE iface;
1425 gckKERNEL kernel = Event->kernel;
1427 gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
1429 Event, Bytes, Physical, Logical, FromWhere);
1431 /* Verify the arguments. */
1432 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1433 gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
1434 gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
1435 gcmkVERIFY_ARGUMENT(Bytes > 0);
1437 /* Create an event. */
1438 iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
1439 iface.u.FreeContiguousMemory.bytes = Bytes;
1440 iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
1441 iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
1443 /* Append it to the queue. */
1444 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1448 return gcvSTATUS_OK;
1451 /* Return the status. */
1456 /*******************************************************************************
1460 ** Schedule an event to trigger a signal.
1465 ** Pointer to an gckEVENT object.
1468 ** Pointer to the signal to trigger.
1470 ** gceKERNEL_WHERE FromWhere
1471 ** Place in the pipe where the event needs to be generated.
1480 IN gctSIGNAL Signal,
1481 IN gceKERNEL_WHERE FromWhere
1485 gcsHAL_INTERFACE iface;
1487 gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
1488 Event, Signal, FromWhere);
1490 /* Verify the arguments. */
1491 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1492 gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
1494 /* Mark the event as a signal. */
1495 iface.command = gcvHAL_SIGNAL;
1496 iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
1498 iface.u.Signal.coid = 0;
1499 iface.u.Signal.rcvid = 0;
1501 iface.u.Signal.auxSignal = 0;
1502 iface.u.Signal.process = 0;
1504 /* Append it to the queue. */
1505 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1509 return gcvSTATUS_OK;
1512 /* Return the status. */
1517 /*******************************************************************************
1519 ** gckEVENT_CommitDone
1521 ** Schedule an event to wake up work thread when commit is done by GPU.
1526 ** Pointer to an gckEVENT object.
1528 ** gceKERNEL_WHERE FromWhere
1529 ** Place in the pipe where the event needs to be generated.
1536 gckEVENT_CommitDone(
1538 IN gceKERNEL_WHERE FromWhere
1542 gcsHAL_INTERFACE iface;
1544 gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
1546 /* Verify the arguments. */
1547 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1549 iface.command = gcvHAL_COMMIT_DONE;
1551 /* Append it to the queue. */
1552 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1556 return gcvSTATUS_OK;
1559 /* Return the status. */
1564 #if gcdPROCESS_ADDRESS_SPACE
1566 gckEVENT_DestroyMmu(
1569 IN gceKERNEL_WHERE FromWhere
1573 gcsHAL_INTERFACE iface;
1575 gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
1577 /* Verify the arguments. */
1578 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1580 iface.command = gcvHAL_DESTROY_MMU;
1581 iface.u.DestroyMmu.mmu = gcmPTR_TO_UINT64(Mmu);
1583 /* Append it to the queue. */
1584 gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
1588 return gcvSTATUS_OK;
1591 /* Return the status. */
1597 /*******************************************************************************
1601 ** Submit the current event queue to the GPU.
1606 ** Pointer to an gckEVENT object.
1609 ** Submit requires one vacant event; if Wait is set to not zero,
1610 ** and there are no vacant events at this time, the function will
1611 ** wait until an event becomes vacant so that submission of the
1612 ** queue is successful.
1614 ** gctBOOL FromPower
1615 ** Determines whether the call originates from inside the power
1616 ** management or not.
1627 IN gctBOOL FromPower,
1628 IN gceCORE_3D_MASK ChipEnable
1635 IN gctBOOL FromPower
1641 gcsEVENT_QUEUE_PTR queue;
1642 gctBOOL acquired = gcvFALSE;
1643 gckCOMMAND command = gcvNULL;
1644 gctBOOL commitEntered = gcvFALSE;
1651 gctSIZE_T chipEnableBytes;
1654 #if gcdINTERRUPT_STATISTIC
1659 gctPOINTER reservedBuffer;
1662 gctUINT32 flushBytes;
1663 gctUINT32 executeBytes;
1664 gckHARDWARE hardware;
1666 gceKERNEL_FLUSH flush = gcvFALSE;
1668 gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
1670 /* Get gckCOMMAND object. */
1671 command = Event->kernel->command;
1672 hardware = Event->kernel->hardware;
1674 gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
1676 gckOS_GetTicks(&Event->lastCommitStamp);
1678 /* Are there event queues? */
1679 if (Event->queueHead != gcvNULL)
1681 /* Acquire the command queue. */
1682 gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
1683 commitEntered = gcvTRUE;
1685 /* Process all queues. */
1686 while (Event->queueHead != gcvNULL)
1688 /* Acquire the list mutex. */
1689 gcmkONERROR(gckOS_AcquireMutex(Event->os,
1690 Event->eventListMutex,
1694 /* Get the current queue. */
1695 queue = Event->queueHead;
1697 /* Allocate an event ID. */
1699 gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source, ChipEnable));
1701 gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source));
1704 /* Copy event list to event ID queue. */
1705 Event->queues[id].head = queue->head;
1707 /* Remove the top queue from the list. */
1708 if (Event->queueHead == Event->queueTail)
1710 Event->queueHead = gcvNULL;
1711 Event->queueTail = gcvNULL;
1715 Event->queueHead = Event->queueHead->next;
1718 /* Free the queue. */
1719 gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
1721 /* Release the list mutex. */
1722 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
1723 acquired = gcvFALSE;
1725 /* Determine cache needed to flush. */
1726 gcmkVERIFY_OK(_QueryFlush(Event, Event->queues[id].head, &flush));
1728 #if gcdINTERRUPT_STATISTIC
1729 gcmkVERIFY_OK(gckOS_AtomIncrement(
1731 Event->interruptCount,
1737 /* Notify immediately on infinite hardware. */
1738 gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
1740 gcmkONERROR(gckEVENT_Notify(Event, 0));
1742 /* Get the size of the hardware event. */
1743 gcmkONERROR(gckHARDWARE_Event(
1747 Event->queues[id].source,
1751 /* Get the size of flush command. */
1752 gcmkONERROR(gckHARDWARE_Flush(
1759 bytes += flushBytes;
1762 gcmkONERROR(gckHARDWARE_ChipEnable(
1769 bytes += chipEnableBytes * 2;
1772 /* Total bytes need to execute. */
1773 executeBytes = bytes;
1775 /* Reserve space in the command queue. */
1776 gcmkONERROR(gckCOMMAND_Reserve(command, bytes, &buffer, &bytes));
1778 reservedBuffer = buffer;
1782 gcmkONERROR(gckHARDWARE_ChipEnable(
1789 buffer = (gctUINT8_PTR)buffer + chipEnableBytes;
1792 /* Set the flush in the command queue. */
1793 gcmkONERROR(gckHARDWARE_Flush(
1800 /* Advance to next command. */
1801 buffer = (gctUINT8_PTR)buffer + flushBytes;
1803 /* Set the hardware event in the command queue. */
1804 gcmkONERROR(gckHARDWARE_Event(
1808 Event->queues[id].source,
1812 /* Advance to next command. */
1813 buffer = (gctUINT8_PTR)buffer + bytes;
1816 gcmkONERROR(gckHARDWARE_ChipEnable(
1819 gcvCORE_3D_ALL_MASK,
1825 gckKERNEL_SecurityExecute(
1831 /* Execute the hardware event. */
1832 gcmkONERROR(gckCOMMAND_Execute(command, executeBytes));
1837 /* Release the command queue. */
1838 gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
1841 gcmkVERIFY_OK(_TryToIdleGPU(Event));
1847 return gcvSTATUS_OK;
1852 /* Need to unroll the mutex acquire. */
1853 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
1858 /* Release the command queue mutex. */
1859 gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
1864 /* Need to unroll the event allocation. */
1865 Event->queues[id].head = gcvNULL;
1868 if (status == gcvSTATUS_GPU_NOT_RESPONDING)
1870 /* Broadcast GPU stuck. */
1871 status = gckOS_Broadcast(Event->os,
1872 Event->kernel->hardware,
1873 gcvBROADCAST_GPU_STUCK);
1876 /* Return the status. */
1881 /*******************************************************************************
1885 ** Commit an event queue from the user.
1890 ** Pointer to an gckEVENT object.
1892 ** gcsQUEUE_PTR Queue
1893 ** User event queue.
1903 IN gcsQUEUE_PTR Queue,
1904 IN gceCORE_3D_MASK ChipEnable
1910 IN gcsQUEUE_PTR Queue
1915 gcsQUEUE_PTR record = gcvNULL, next;
1916 gctUINT32 processID;
1917 gctBOOL needCopy = gcvFALSE;
1919 gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
1921 /* Verify the arguments. */
1922 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
1924 /* Get the current process ID. */
1925 gcmkONERROR(gckOS_GetProcessID(&processID));
1927 /* Query if we need to copy the client data. */
1928 gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
1930 /* Loop while there are records in the queue. */
1931 while (Queue != gcvNULL)
1937 /* Point to stack record. */
1940 /* Copy the data from the client. */
1941 gcmkONERROR(gckOS_CopyFromUserData(Event->os,
1944 gcmSIZEOF(gcsQUEUE)));
1948 gctPOINTER pointer = gcvNULL;
1950 /* Map record into kernel memory. */
1951 gcmkONERROR(gckOS_MapUserPointer(Event->os,
1953 gcmSIZEOF(gcsQUEUE),
1959 /* Append event record to event queue. */
1961 gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
1963 /* Next record in the queue. */
1964 next = gcmUINT64_TO_PTR(record->next);
1968 /* Unmap record from kernel memory. */
1970 gckOS_UnmapUserPointer(Event->os,
1972 gcmSIZEOF(gcsQUEUE),
1973 (gctPOINTER *) record));
1980 /* Submit the event list. */
1982 gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, ChipEnable));
1984 gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
1989 return gcvSTATUS_OK;
1992 if ((record != gcvNULL) && !needCopy)
1995 gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
1997 gcmSIZEOF(gcsQUEUE),
1998 (gctPOINTER *) record));
2001 /* Return the status. */
2006 /*******************************************************************************
2010 ** Schedule a composition event and start a composition.
2015 ** Pointer to an gckEVENT object.
2017 ** gcsHAL_COMPOSE_PTR Info
2018 ** Pointer to the composition structure.
2027 IN gcsHAL_COMPOSE_PTR Info
2031 gcsEVENT_PTR headRecord;
2032 gcsEVENT_PTR tailRecord;
2033 gcsEVENT_PTR tempRecord;
2035 gctUINT32 processID;
2037 gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
2039 /* Verify the arguments. */
2040 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
2041 gcmkVERIFY_ARGUMENT(Info != gcvNULL);
2043 /* Allocate an event ID. */
2045 gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
2047 gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
2050 /* Get process ID. */
2051 gcmkONERROR(gckOS_GetProcessID(&processID));
2053 /* Allocate a record. */
2054 gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
2055 headRecord = tailRecord = tempRecord;
2057 /* Initialize the record. */
2058 tempRecord->info.command = gcvHAL_SIGNAL;
2059 tempRecord->info.u.Signal.process = Info->process;
2061 tempRecord->info.u.Signal.coid = Info->coid;
2062 tempRecord->info.u.Signal.rcvid = Info->rcvid;
2064 tempRecord->info.u.Signal.signal = Info->signal;
2065 tempRecord->info.u.Signal.auxSignal = 0;
2066 tempRecord->next = gcvNULL;
2067 tempRecord->processID = processID;
2069 /* Allocate another record for user signal #1. */
2070 if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
2072 /* Allocate a record. */
2073 gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
2074 tailRecord->next = tempRecord;
2075 tailRecord = tempRecord;
2077 /* Initialize the record. */
2078 tempRecord->info.command = gcvHAL_SIGNAL;
2079 tempRecord->info.u.Signal.process = Info->userProcess;
2081 tempRecord->info.u.Signal.coid = Info->coid;
2082 tempRecord->info.u.Signal.rcvid = Info->rcvid;
2084 tempRecord->info.u.Signal.signal = Info->userSignal1;
2085 tempRecord->info.u.Signal.auxSignal = 0;
2086 tempRecord->next = gcvNULL;
2087 tempRecord->processID = processID;
2090 /* Allocate another record for user signal #2. */
2091 if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
2093 /* Allocate a record. */
2094 gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
2095 tailRecord->next = tempRecord;
2097 /* Initialize the record. */
2098 tempRecord->info.command = gcvHAL_SIGNAL;
2099 tempRecord->info.u.Signal.process = Info->userProcess;
2101 tempRecord->info.u.Signal.coid = Info->coid;
2102 tempRecord->info.u.Signal.rcvid = Info->rcvid;
2104 tempRecord->info.u.Signal.signal = Info->userSignal2;
2105 tempRecord->info.u.Signal.auxSignal = 0;
2106 tempRecord->next = gcvNULL;
2107 tempRecord->processID = processID;
2110 /* Set the event list. */
2111 Event->queues[id].head = headRecord;
2113 /* Start composition. */
2114 gcmkONERROR(gckHARDWARE_Compose(
2115 Event->kernel->hardware, processID,
2116 gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
2121 return gcvSTATUS_OK;
2124 /* Return the status. */
2129 /*******************************************************************************
2131 ** gckEVENT_Interrupt
2133 ** Called by the interrupt service routine to store the triggered interrupt
2134 ** mask to be later processed by gckEVENT_Notify.
2139 ** Pointer to an gckEVENT object.
2142 ** Mask for the 32 interrupts.
2162 gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
2164 /* Verify the arguments. */
2165 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
2167 if (Data & 0x20000000)
2171 Data &= ~0x20000000;
2174 if (Event->kernel->core == gcvCORE_MAJOR)
2177 /* Get first entry information. */
2179 gckENTRYQUEUE_Dequeue(&Event->kernel->command->queue, &data));
2181 /* Make sure FE is idle. */
2184 gcmkVERIFY_OK(gckOS_ReadRegisterEx(
2186 Event->kernel->core,
2190 while (idle != 0x7FFFFFFF);
2192 /* Start Command Parser. */
2193 gcmkVERIFY_OK(gckHARDWARE_Execute(
2194 Event->kernel->hardware,
2201 /* Combine current interrupt status with pending flags. */
2204 if (Event->kernel->core == gcvCORE_MAJOR)
2206 gckOS_AtomSetMask(Event->pending3D[CoreId], Data);
2211 gckOS_AtomSetMask(Event->pending, Data);
2213 #elif defined(__QNXNTO__)
2215 if (Event->kernel->core == gcvCORE_MAJOR)
2217 atomic_set(&Event->pending3D[CoreId], Data);
2222 atomic_set(&Event->pending, Data);
2227 if (Event->kernel->core == gcvCORE_MAJOR)
2229 for (i = 0; i < gcdMULTI_GPU; i++)
2231 Event->pending3D[i] |= Data;
2236 if (Event->kernel->core == gcvCORE_MAJOR)
2238 Event->pending3D[CoreId] |= Data;
2244 Event->pending |= Data;
2248 #if gcdINTERRUPT_STATISTIC
2253 for (j = 0; j < gcmCOUNTOF(Event->queues); j++)
2255 if ((Data & (1 << j)))
2257 gcmkVERIFY_OK(gckOS_AtomDecrement(Event->os,
2258 Event->interruptCount,
2267 return gcvSTATUS_OK;
2270 /*******************************************************************************
2274 ** Process all triggered interrupts.
2279 ** Pointer to an gckEVENT object.
2291 gceSTATUS status = gcvSTATUS_OK;
2293 gcsEVENT_QUEUE * queue;
2295 gctBOOL acquired = gcvFALSE;
2298 gctUINT pending = 0;
2299 gckKERNEL kernel = Event->kernel;
2301 gceCORE core = Event->kernel->core;
2304 gctUINT pendingMask;
2307 gctBOOL suspended = gcvFALSE;
2309 #if gcmIS_DEBUG(gcdDEBUG_TRACE)
2310 gctINT eventNumber = 0;
2314 gcskSECURE_CACHE_PTR cache;
2316 gckVIDMEM_NODE nodeObject;
2317 gcuVIDMEM_NODE_PTR node;
2319 gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
2321 /* Verify the arguments. */
2322 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
2327 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
2329 if (Event->queues[i].head != gcvNULL)
2331 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2332 "Queue(%d): stamp=%llu source=%d",
2334 Event->queues[i].stamp,
2335 Event->queues[i].source);
2342 /* Set busy flag. */
2343 gckOS_AtomicExchange(Event->os, &Event->busy, 1, &busy);
2346 /* Another thread is already busy - abort. */
2353 gcsEVENT_PTR record;
2355 gctUINT32 pend[gcdMULTI_GPU];
2356 gctUINT32 pendMask[gcdMULTI_GPU];
2359 /* Grab the mutex queue. */
2360 gcmkONERROR(gckOS_AcquireMutex(Event->os,
2361 Event->eventQueueMutex,
2367 if (core == gcvCORE_MAJOR)
2369 /* Get current interrupts. */
2370 for (i = 0; i < gcdMULTI_GPU; i++)
2372 gckOS_AtomGet(Event->os, Event->pending3D[i], (gctINT32_PTR)&pend[i]);
2373 gckOS_AtomGet(Event->os, Event->pending3DMask[i], (gctINT32_PTR)&pendMask[i]);
2376 gckOS_AtomGet(Event->os, Event->pendingMask, (gctINT32_PTR)&pendingMask);
2381 gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
2384 /* Suspend interrupts. */
2385 gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
2386 suspended = gcvTRUE;
2389 if (core == gcvCORE_MAJOR)
2391 for (i = 0; i < gcdMULTI_GPU; i++)
2393 /* Get current interrupts. */
2394 pend[i] = Event->pending3D[i];
2395 pendMask[i] = Event->pending3DMask[i];
2398 pendingMask = Event->pendingMask;
2403 pending = Event->pending;
2406 /* Resume interrupts. */
2407 gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
2408 suspended = gcvFALSE;
2412 if (core == gcvCORE_MAJOR)
2414 for (i = 0; i < gcdMULTI_GPU; i++)
2416 gctUINT32 bad_pend = (pend[i] & ~pendMask[i]);
2421 gcvLEVEL_ERROR, gcvZONE_EVENT,
2422 gcmSIZEOF(bad_pend) + gcmSIZEOF(i),
2423 "Interrupts 0x%x are not unexpected for Core%d.",
2427 gckOS_AtomClearMask(Event->pending3D[i], bad_pend);
2429 pend[i] &= pendMask[i];
2433 pending = (pend[0] & pend[1] & pendingMask) /* Check combined events on both GPUs */
2434 | (pend[0] & ~pendingMask) /* Check individual events on GPU 0 */
2435 | (pend[1] & ~pendingMask); /* Check individual events on GPU 1 */
2441 /* Release the mutex queue. */
2442 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
2443 acquired = gcvFALSE;
2445 /* No more pending interrupts - done. */
2449 if (pending & 0x80000000)
2451 gcmkPRINT("AXI BUS ERROR");
2452 pending &= 0x7FFFFFFF;
2455 if (pending & 0x40000000)
2457 gckHARDWARE_DumpMMUException(Event->kernel->hardware);
2459 pending &= 0xBFFFFFFF;
2463 gcvLEVEL_INFO, gcvZONE_EVENT,
2465 "Pending interrupts 0x%x",
2474 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
2476 if (Event->queues[i].head != gcvNULL)
2478 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2479 "Queue(%d): stamp=%llu source=%d",
2481 Event->queues[i].stamp,
2482 Event->queues[i].source);
2488 /* Find the oldest pending interrupt. */
2489 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
2491 if ((Event->queues[i].head != gcvNULL)
2492 && (pending & (1 << i))
2495 if ((queue == gcvNULL)
2496 || (Event->queues[i].stamp < queue->stamp)
2499 queue = &Event->queues[i];
2501 #if gcmIS_DEBUG(gcdDEBUG_TRACE)
2508 if (queue == gcvNULL)
2511 gcvLEVEL_ERROR, gcvZONE_EVENT,
2513 "Interrupts 0x%x are not pending.",
2519 if (core == gcvCORE_MAJOR)
2521 /* Mark pending interrupts as handled. */
2522 for (i = 0; i < gcdMULTI_GPU; i++)
2524 gckOS_AtomClearMask(Event->pending3D[i], pending);
2525 gckOS_AtomClearMask(Event->pending3DMask[i], pending);
2528 gckOS_AtomClearMask(Event->pendingMask, pending);
2533 gckOS_AtomClearMask(Event->pending, pending);
2536 #elif defined(__QNXNTO__)
2538 if (core == gcvCORE_MAJOR)
2540 for (i = 0; i < gcdMULTI_GPU; i++)
2542 atomic_clr((gctUINT32_PTR)&Event->pending3D[i], pending);
2543 atomic_clr((gctUINT32_PTR)&Event->pending3DMask[i], pending);
2546 atomic_clr((gctUINT32_PTR)&Event->pendingMask, pending);
2551 atomic_clr((gctUINT32_PTR)&Event->pending, pending);
2554 /* Suspend interrupts. */
2555 gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
2556 suspended = gcvTRUE;
2559 if (core == gcvCORE_MAJOR)
2561 for (i = 0; i < gcdMULTI_GPU; i++)
2563 /* Mark pending interrupts as handled. */
2564 Event->pending3D[i] &= ~pending;
2565 Event->pending3DMask[i] &= ~pending;
2571 Event->pending &= ~pending;
2574 /* Resume interrupts. */
2575 gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
2576 suspended = gcvFALSE;
2579 /* Release the mutex queue. */
2580 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
2581 acquired = gcvFALSE;
2585 /* Check whether there is a missed interrupt. */
2586 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
2588 if ((Event->queues[i].head != gcvNULL)
2589 && (Event->queues[i].stamp < queue->stamp)
2590 && (Event->queues[i].source <= queue->source)
2592 && (Event->queues[i].chipEnable == queue->chipEnable)
2598 gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
2599 "Event %d lost (stamp %llu)",
2600 i, Event->queues[i].stamp
2603 /* Use this event instead. */
2604 queue = &Event->queues[i];
2611 #if gcmIS_DEBUG(gcdDEBUG_TRACE)
2613 gcvLEVEL_INFO, gcvZONE_EVENT,
2614 gcmSIZEOF(eventNumber),
2615 "Processing interrupt %d",
2623 if (core == gcvCORE_MAJOR)
2625 for (i = 0; i < gcdMULTI_GPU; i++)
2627 /* Mark pending interrupt as handled. */
2628 gckOS_AtomClearMask(Event->pending3D[i], mask);
2629 gckOS_AtomClearMask(Event->pending3DMask[i], mask);
2632 gckOS_AtomClearMask(Event->pendingMask, mask);
2637 gckOS_AtomClearMask(Event->pending, mask);
2640 #elif defined(__QNXNTO__)
2642 if (core == gcvCORE_MAJOR)
2644 for (i = 0; i < gcdMULTI_GPU; i++)
2646 atomic_clr(&Event->pending3D[i], mask);
2647 atomic_clr(&Event->pending3DMask[i], mask);
2650 atomic_clr(&Event->pendingMask, mask);
2655 atomic_clr(&Event->pending, mask);
2658 /* Suspend interrupts. */
2659 gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
2660 suspended = gcvTRUE;
2663 if (core == gcvCORE_MAJOR)
2665 for (i = 0; i < gcdMULTI_GPU; i++)
2667 /* Mark pending interrupt as handled. */
2668 Event->pending3D[i] &= ~mask;
2669 Event->pending3DMask[i] &= ~mask;
2672 Event->pendingMask &= ~mask;
2677 Event->pending &= ~mask;
2680 /* Resume interrupts. */
2681 gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
2682 suspended = gcvFALSE;
2685 /* Grab the event head. */
2686 record = queue->head;
2688 /* Now quickly clear its event list. */
2689 queue->head = gcvNULL;
2691 /* Release the mutex queue. */
2692 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
2693 acquired = gcvFALSE;
2695 /* Increase the number of free events. */
2696 gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
2698 /* Walk all events for this interrupt. */
2699 while (record != gcvNULL)
2701 gcsEVENT_PTR recordNext;
2709 /* Grab next record. */
2710 recordNext = record->next;
2713 /* Assign record->processID as the pid for this galcore thread.
2714 * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
2716 drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
2720 /* Get the cache that belongs to this process. */
2721 gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
2727 gcvLEVEL_INFO, gcvZONE_EVENT,
2728 gcmSIZEOF(record->info.command),
2729 "Processing event type: %d",
2730 record->info.command
2733 switch (record->info.command)
2735 case gcvHAL_FREE_NON_PAGED_MEMORY:
2736 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2737 "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
2738 gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
2740 /* Free non-paged memory. */
2741 status = gckOS_FreeNonPagedMemory(
2743 (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
2744 gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
2745 gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
2747 if (gcmIS_SUCCESS(status))
2750 gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
2753 gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
2754 (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
2757 gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
2760 case gcvHAL_FREE_CONTIGUOUS_MEMORY:
2762 gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2763 "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
2764 gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
2766 /* Unmap the user memory. */
2767 status = gckOS_FreeContiguous(
2769 gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
2770 gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
2771 (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
2773 if (gcmIS_SUCCESS(status))
2776 gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
2779 gcmUINT64_TO_PTR(event->event.u.FreeContiguousMemory.logical),
2780 (gctSIZE_T) event->event.u.FreeContiguousMemory.bytes));
2783 gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
2786 case gcvHAL_WRITE_DATA:
2788 /* Convert physical into logical address. */
2790 gckOS_MapPhysical(Event->os,
2791 record->info.u.WriteData.address,
2792 gcmSIZEOF(gctUINT32),
2797 gckOS_WriteMemory(Event->os,
2799 record->info.u.WriteData.data));
2801 /* Unmap the physical memory. */
2803 gckOS_UnmapPhysical(Event->os,
2805 gcmSIZEOF(gctUINT32)));
2809 gckOS_WriteMemory(Event->os,
2811 record->info.u.WriteData.address,
2812 record->info.u.WriteData.data));
2816 case gcvHAL_UNLOCK_VIDEO_MEMORY:
2817 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2818 "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
2819 record->info.u.UnlockVideoMemory.node);
2821 nodeObject = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
2823 node = nodeObject->node;
2825 /* Save node information before it disappears. */
2827 node = event->event.u.UnlockVideoMemory.node;
2828 if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
2835 logical = node->Virtual.logical;
2836 bytes = node->Virtual.bytes;
2841 status = gckVIDMEM_Unlock(
2844 record->info.u.UnlockVideoMemory.type,
2848 if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
2850 gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
2858 #if gcdPROCESS_ADDRESS_SPACE
2859 gcmkVERIFY_OK(gckVIDMEM_NODE_Unlock(
2866 status = gckVIDMEM_NODE_Dereference(Event->kernel, nodeObject);
2870 signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
2871 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2872 "gcvHAL_SIGNAL: 0x%x",
2876 if ((record->info.u.Signal.coid == 0)
2877 && (record->info.u.Signal.rcvid == 0)
2880 /* Kernel signal. */
2882 gckOS_Signal(Event->os,
2890 gckOS_UserSignal(Event->os,
2892 record->info.u.Signal.rcvid,
2893 record->info.u.Signal.coid));
2897 if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
2899 /* Kernel signal. */
2901 gckOS_Signal(Event->os,
2909 gckOS_UserSignal(Event->os,
2911 gcmUINT64_TO_PTR(record->info.u.Signal.process)));
2914 gcmkASSERT(record->info.u.Signal.auxSignal == 0);
2918 case gcvHAL_UNMAP_USER_MEMORY:
2919 info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
2920 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2921 "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
2924 /* Unmap the user memory. */
2925 status = gckOS_UnmapUserMemory(
2927 Event->kernel->core,
2928 gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
2929 (gctSIZE_T) record->info.u.UnmapUserMemory.size,
2931 record->info.u.UnmapUserMemory.address);
2934 if (gcmIS_SUCCESS(status))
2936 gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
2939 gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
2940 (gctSIZE_T) record->info.u.UnmapUserMemory.size));
2943 gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
2946 case gcvHAL_TIMESTAMP:
2947 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
2948 "gcvHAL_TIMESTAMP: %d %d",
2949 record->info.u.TimeStamp.timer,
2950 record->info.u.TimeStamp.request);
2952 /* Process the timestamp. */
2953 switch (record->info.u.TimeStamp.request)
2956 status = gckOS_GetTime(&Event->kernel->timers[
2957 record->info.u.TimeStamp.timer].
2962 status = gckOS_GetTime(&Event->kernel->timers[
2963 record->info.u.TimeStamp.timer].
2969 gcvLEVEL_ERROR, gcvZONE_EVENT,
2970 gcmSIZEOF(record->info.u.TimeStamp.request),
2971 "Invalid timestamp request: %d",
2972 record->info.u.TimeStamp.request
2975 status = gcvSTATUS_INVALID_ARGUMENT;
2980 case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
2982 gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
2983 (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
2984 gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
2985 gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
2987 gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
2990 #if gcdANDROID_NATIVE_FENCE_SYNC
2991 case gcvHAL_SYNC_POINT:
2993 gctSYNC_POINT syncPoint;
2995 syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
2996 status = gckOS_SignalSyncPoint(Event->os, syncPoint);
3001 #if gcdPROCESS_ADDRESS_SPACE
3002 case gcvHAL_DESTROY_MMU:
3003 status = gckMMU_Destroy(gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
3007 case gcvHAL_COMMIT_DONE:
3011 /* Invalid argument. */
3013 gcvLEVEL_ERROR, gcvZONE_EVENT,
3014 gcmSIZEOF(record->info.command),
3015 "Unknown event type: %d",
3016 record->info.command
3019 status = gcvSTATUS_INVALID_ARGUMENT;
3023 /* Make sure there are no errors generated. */
3024 if (gcmIS_ERROR(status))
3027 gcvLEVEL_WARNING, gcvZONE_EVENT,
3029 "Event produced status: %d(%s)",
3030 status, gckOS_DebugStatus2Name(status));
3033 /* Free the event. */
3034 gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
3036 /* Advance to next record. */
3037 record = recordNext;
3040 gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
3041 "Handled interrupt 0x%x", mask);
3045 /* Clear busy flag. */
3046 gckOS_AtomicExchange(Event->os, &Event->busy, 0, &oldValue);
3051 gcmkONERROR(_TryToIdleGPU(Event));
3059 return gcvSTATUS_OK;
3064 /* Release mutex. */
3065 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
3071 /* Resume interrupts. */
3072 gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
3076 /* Return the status. */
3081 /*******************************************************************************
3082 ** gckEVENT_FreeProcess
3084 ** Free all events owned by a particular process ID.
3089 ** Pointer to an gckEVENT object.
3091 ** gctUINT32 ProcessID
3092 ** Process ID of the process to be freed up.
3099 gckEVENT_FreeProcess(
3101 IN gctUINT32 ProcessID
3105 gctBOOL acquired = gcvFALSE;
3106 gcsEVENT_PTR record, next;
3108 gcsEVENT_PTR deleteHead, deleteTail;
3110 gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
3112 /* Verify the arguments. */
3113 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
3115 /* Walk through all queues. */
3116 for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
3118 if (Event->queues[i].head != gcvNULL)
3120 /* Grab the event queue mutex. */
3121 gcmkONERROR(gckOS_AcquireMutex(Event->os,
3122 Event->eventQueueMutex,
3126 /* Grab the mutex head. */
3127 record = Event->queues[i].head;
3128 Event->queues[i].head = gcvNULL;
3129 Event->queues[i].tail = gcvNULL;
3130 deleteHead = gcvNULL;
3131 deleteTail = gcvNULL;
3133 while (record != gcvNULL)
3135 next = record->next;
3136 if (record->processID == ProcessID)
3138 if (deleteHead == gcvNULL)
3140 deleteHead = record;
3144 deleteTail->next = record;
3147 deleteTail = record;
3151 if (Event->queues[i].head == gcvNULL)
3153 Event->queues[i].head = record;
3157 Event->queues[i].tail->next = record;
3160 Event->queues[i].tail = record;
3163 record->next = gcvNULL;
3167 /* Release the mutex queue. */
3168 gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
3169 acquired = gcvFALSE;
3171 /* Loop through the entire list of events. */
3172 for (record = deleteHead; record != gcvNULL; record = next)
3174 /* Get the next event record. */
3175 next = record->next;
3177 /* Free the event record. */
3178 gcmkONERROR(gckEVENT_FreeRecord(Event, record));
3183 gcmkONERROR(_TryToIdleGPU(Event));
3187 return gcvSTATUS_OK;
3190 /* Release the event queue mutex. */
3193 gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
3196 /* Return the status. */
3201 /*******************************************************************************
3204 ** Stop the hardware using the End event mechanism.
3209 ** Pointer to an gckEVENT object.
3211 ** gctUINT32 ProcessID
3212 ** Process ID Logical belongs.
3214 ** gctPHYS_ADDR Handle
3215 ** Physical address handle. If gcvNULL it is video memory.
3217 ** gctPOINTER Logical
3218 ** Logical address to flush.
3221 ** Pointer to the signal to trigger.
3230 IN gctUINT32 ProcessID,
3231 IN gctPHYS_ADDR Handle,
3232 IN gctPOINTER Logical,
3233 IN gctSIGNAL Signal,
3234 IN OUT gctUINT32 * waitSize
3238 /* gctSIZE_T waitSize;*/
3239 gcsEVENT_PTR record;
3242 gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
3244 Event, ProcessID, Handle, Logical, Signal);
3246 /* Verify the arguments. */
3247 gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
3249 /* Submit the current event queue. */
3251 gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
3253 gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
3256 gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
3258 gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
3261 /* Allocate a record. */
3262 gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
3264 /* Initialize the record. */
3265 record->next = gcvNULL;
3266 record->processID = ProcessID;
3267 record->info.command = gcvHAL_SIGNAL;
3268 record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
3270 record->info.u.Signal.coid = 0;
3271 record->info.u.Signal.rcvid = 0;
3273 record->info.u.Signal.auxSignal = 0;
3274 record->info.u.Signal.process = 0;
3276 /* Append the record. */
3277 Event->queues[id].head = record;
3279 /* Replace last WAIT with END. */
3280 gcmkONERROR(gckHARDWARE_End(
3281 Event->kernel->hardware, Logical, waitSize
3284 #if gcdNONPAGED_MEMORY_CACHEABLE
3285 /* Flush the cache for the END. */
3286 gcmkONERROR(gckOS_CacheClean(
3296 /* Wait for the signal. */
3297 gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
3301 return gcvSTATUS_OK;
3305 /* Return the status. */
3315 switch (record->info.command)
3317 case gcvHAL_FREE_NON_PAGED_MEMORY:
3318 gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
3321 case gcvHAL_FREE_CONTIGUOUS_MEMORY:
3322 gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
3325 case gcvHAL_WRITE_DATA:
3326 gcmkPRINT(" gcvHAL_WRITE_DATA");
3329 case gcvHAL_UNLOCK_VIDEO_MEMORY:
3330 gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
3334 gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
3335 record->info.u.Signal.process,
3336 record->info.u.Signal.signal);
3339 case gcvHAL_UNMAP_USER_MEMORY:
3340 gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
3343 case gcvHAL_TIMESTAMP:
3344 gcmkPRINT(" gcvHAL_TIMESTAMP");
3347 case gcvHAL_COMMIT_DONE:
3348 gcmkPRINT(" gcvHAL_COMMIT_DONE");
3351 case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
3352 gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
3353 record->info.u.FreeVirtualCommandBuffer.logical);
3356 case gcvHAL_SYNC_POINT:
3357 gcmkPRINT(" gcvHAL_SYNC_POINT syncPoint=0x%08x",
3358 gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint));
3362 case gcvHAL_DESTROY_MMU:
3363 gcmkPRINT(" gcvHAL_DESTORY_MMU mmu=0x%08x",
3364 gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
3368 gcmkPRINT(" Illegal Event %d", record->info.command);
3373 /*******************************************************************************
3376 ** Dump record in event queue when stuck happens.
3377 ** No protection for the event queue.
3384 gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
3385 gcsEVENT_QUEUE_PTR queue;
3386 gcsEVENT_PTR record = gcvNULL;
3388 #if gcdINTERRUPT_STATISTIC
3389 gctINT32 pendingInterrupt;
3390 gctUINT32 intrAcknowledge;
3393 gcmkHEADER_ARG("Event=0x%x", Event);
3395 gcmkPRINT("**************************\n");
3396 gcmkPRINT("*** EVENT STATE DUMP ***\n");
3397 gcmkPRINT("**************************\n");
3399 gcmkPRINT(" Unsumbitted Event:");
3403 record = queueHead->head;
3405 gcmkPRINT(" [%x]:", queue);
3408 _PrintRecord(record);
3409 record = record->next;
3412 if (queueHead == Event->queueTail)
3414 queueHead = gcvNULL;
3418 queueHead = queueHead->next;
3422 gcmkPRINT(" Untriggered Event:");
3423 for (i = 0; i < gcmCOUNTOF(Event->queues); i++)
3425 queue = &Event->queues[i];
3426 record = queue->head;
3428 gcmkPRINT(" [%d]:", i);
3431 _PrintRecord(record);
3432 record = record->next;
3436 #if gcdINTERRUPT_STATISTIC
3437 gckOS_AtomGet(Event->os, Event->interruptCount, &pendingInterrupt);
3438 gcmkPRINT(" Number of Pending Interrupt: %d", pendingInterrupt);
3440 if (Event->kernel->recovery == 0)
3442 gckOS_ReadRegisterEx(
3444 Event->kernel->core,
3449 gcmkPRINT(" INTR_ACKNOWLEDGE=0x%x", intrAcknowledge);
3454 return gcvSTATUS_OK;