]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nvkm / subdev / timer / base.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25
26 u64
27 nvkm_timer_read(struct nvkm_timer *tmr)
28 {
29         return tmr->func->read(tmr);
30 }
31
32 void
33 nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
34 {
35         struct nvkm_alarm *alarm, *atemp;
36         unsigned long flags;
37         LIST_HEAD(exec);
38
39         /* Process pending alarms. */
40         spin_lock_irqsave(&tmr->lock, flags);
41         list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
42                 /* Have we hit the earliest alarm that hasn't gone off? */
43                 if (alarm->timestamp > nvkm_timer_read(tmr)) {
44                         /* Schedule it.  If we didn't race, we're done. */
45                         tmr->func->alarm_init(tmr, alarm->timestamp);
46                         if (alarm->timestamp > nvkm_timer_read(tmr))
47                                 break;
48                 }
49
50                 /* Move to completed list.  We'll drop the lock before
51                  * executing the callback so it can reschedule itself.
52                  */
53                 list_move_tail(&alarm->head, &exec);
54         }
55
56         /* Shut down interrupt if no more pending alarms. */
57         if (list_empty(&tmr->alarms))
58                 tmr->func->alarm_fini(tmr);
59         spin_unlock_irqrestore(&tmr->lock, flags);
60
61         /* Execute completed callbacks. */
62         list_for_each_entry_safe(alarm, atemp, &exec, head) {
63                 list_del_init(&alarm->head);
64                 alarm->func(alarm);
65         }
66 }
67
68 void
69 nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
70 {
71         struct nvkm_alarm *list;
72         unsigned long flags;
73
74         /* Remove alarm from pending list.
75          *
76          * This both protects against the corruption of the list,
77          * and implements alarm rescheduling/cancellation.
78          */
79         spin_lock_irqsave(&tmr->lock, flags);
80         list_del_init(&alarm->head);
81
82         if (nsec) {
83                 /* Insert into pending list, ordered earliest to latest. */
84                 alarm->timestamp = nvkm_timer_read(tmr) + nsec;
85                 list_for_each_entry(list, &tmr->alarms, head) {
86                         if (list->timestamp > alarm->timestamp)
87                                 break;
88                 }
89                 list_add_tail(&alarm->head, &list->head);
90         }
91         spin_unlock_irqrestore(&tmr->lock, flags);
92
93         /* process pending alarms */
94         nvkm_timer_alarm_trigger(tmr);
95 }
96
97 void
98 nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
99 {
100         unsigned long flags;
101         spin_lock_irqsave(&tmr->lock, flags);
102         list_del_init(&alarm->head);
103         spin_unlock_irqrestore(&tmr->lock, flags);
104 }
105
106 static void
107 nvkm_timer_intr(struct nvkm_subdev *subdev)
108 {
109         struct nvkm_timer *tmr = nvkm_timer(subdev);
110         tmr->func->intr(tmr);
111 }
112
113 static int
114 nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
115 {
116         struct nvkm_timer *tmr = nvkm_timer(subdev);
117         tmr->func->alarm_fini(tmr);
118         return 0;
119 }
120
121 static int
122 nvkm_timer_init(struct nvkm_subdev *subdev)
123 {
124         struct nvkm_timer *tmr = nvkm_timer(subdev);
125         if (tmr->func->init)
126                 tmr->func->init(tmr);
127         tmr->func->time(tmr, ktime_to_ns(ktime_get()));
128         nvkm_timer_alarm_trigger(tmr);
129         return 0;
130 }
131
132 static void *
133 nvkm_timer_dtor(struct nvkm_subdev *subdev)
134 {
135         return nvkm_timer(subdev);
136 }
137
138 static const struct nvkm_subdev_func
139 nvkm_timer = {
140         .dtor = nvkm_timer_dtor,
141         .init = nvkm_timer_init,
142         .fini = nvkm_timer_fini,
143         .intr = nvkm_timer_intr,
144 };
145
146 int
147 nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
148                 int index, struct nvkm_timer **ptmr)
149 {
150         struct nvkm_timer *tmr;
151
152         if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
153                 return -ENOMEM;
154
155         nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
156         tmr->func = func;
157         INIT_LIST_HEAD(&tmr->alarms);
158         spin_lock_init(&tmr->lock);
159         return 0;
160 }