filename | src/eventq.c |
changeset | 265:5daf59b7f31b |
next | 422:61a0598e07ff |
author | nkeynes |
date | Sat Jan 06 04:06:36 2007 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Implement event queue. Fix pvr2 timing (yes, again). |
view | annotate | diff | log | raw |
1 /**
2 * $Id: eventq.c,v 1.1 2007-01-06 04:06:36 nkeynes Exp $
3 *
4 * Simple implementation of one-shot timers. Effectively this allows IO
5 * devices to wait until a particular time before completing. We expect
6 * there to be at least half a dozen or so continually scheduled events
7 * (TMU and PVR2), peaking around 20+.
8 *
9 * Copyright (c) 2005 Nathan Keynes.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
22 #include <assert.h>
23 #include "dreamcast.h"
24 #include "eventq.h"
25 #include "sh4core.h"
27 #define LONG_SCAN_PERIOD 1000000000 /* 1 second */
29 typedef struct event {
30 uint32_t id;
31 uint32_t seconds;
32 uint32_t nanosecs;
33 event_func_t func;
35 struct event *next;
36 } *event_t;
38 static struct event events[MAX_EVENT_ID];
40 /**
41 * Countdown to the next scan of the long-duration list (greater than 1 second).
42 */
43 static int long_scan_time_remaining;
45 static event_t event_head;
46 static event_t long_event_head;
48 void event_reset();
49 void event_init();
50 uint32_t event_run_slice( uint32_t nanosecs );
51 void event_save_state( FILE *f );
52 int event_load_state( FILE * f );
54 struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
55 NULL, event_save_state, event_load_state };
57 static void event_update_pending( )
58 {
59 if( event_head == NULL ) {
60 if( !(sh4r.event_types & PENDING_IRQ) ) {
61 sh4r.event_pending = NOT_SCHEDULED;
62 }
63 sh4r.event_types &= (~PENDING_EVENT);
64 } else {
65 if( !(sh4r.event_types & PENDING_IRQ) ) {
66 sh4r.event_pending = event_head->nanosecs;
67 }
68 sh4r.event_types |= PENDING_EVENT;
69 }
70 }
72 uint32_t event_get_next_time( )
73 {
74 if( event_head == NULL ) {
75 return NOT_SCHEDULED;
76 } else {
77 return event_head->nanosecs;
78 }
79 }
81 /**
82 * Add the event to the short queue.
83 */
84 static void event_enqueue( event_t event )
85 {
86 if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
87 event->next = event_head;
88 event_head = event;
89 event_update_pending();
90 } else {
91 event_t cur = event_head;
92 event_t next = cur->next;
93 while( next != NULL && event->nanosecs >= next->nanosecs ) {
94 cur = next;
95 next = cur->next;
96 }
97 event->next = next;
98 cur->next = event;
99 }
100 }
102 static void event_dequeue( event_t event )
103 {
104 if( event_head == NULL ) {
105 ERROR( "Empty event queue but should contain event %d", event->id );
106 } else if( event_head == event ) {
107 /* removing queue head */
108 event_head = event_head->next;
109 event_update_pending();
110 } else {
111 event_t cur = event_head;
112 event_t next = cur->next;
113 while( next != NULL ) {
114 if( next == event ) {
115 cur->next = next->next;
116 break;
117 }
118 cur = next;
119 next = cur->next;
120 }
121 }
122 }
124 static void event_dequeue_long( event_t event )
125 {
126 if( long_event_head == NULL ) {
127 ERROR( "Empty long event queue but should contain event %d", event->id );
128 } else if( long_event_head == event ) {
129 /* removing queue head */
130 long_event_head = long_event_head->next;
131 } else {
132 event_t cur = long_event_head;
133 event_t next = cur->next;
134 while( next != NULL ) {
135 if( next == event ) {
136 cur->next = next->next;
137 break;
138 }
139 cur = next;
140 next = cur->next;
141 }
142 }
143 }
145 void register_event_callback( int eventid, event_func_t func )
146 {
147 events[eventid].func = func;
148 }
150 void event_schedule( int eventid, uint32_t nanosecs )
151 {
152 int i;
154 nanosecs += sh4r.slice_cycle;
156 event_t event = &events[eventid];
158 if( event->nanosecs != NOT_SCHEDULED ) {
159 /* Event is already scheduled. Remove it from the list first */
160 event_cancel(eventid);
161 }
163 event->id = eventid;
164 event->seconds = 0;
165 event->nanosecs = nanosecs;
167 event_enqueue( event );
168 }
170 void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
171 if( seconds == 0 ) {
172 event_schedule( eventid, nanosecs );
173 } else {
174 event_t event = &events[eventid];
176 if( event->nanosecs != NOT_SCHEDULED ) {
177 /* Event is already scheduled. Remove it from the list first */
178 event_cancel(eventid);
179 }
181 event->id = eventid;
182 event->seconds = seconds;
183 event->nanosecs = nanosecs;
184 event->next = long_event_head;
185 long_event_head = event;
186 }
188 }
190 void event_cancel( int eventid )
191 {
192 event_t event = &events[eventid];
193 if( event->nanosecs == NOT_SCHEDULED ) {
194 return; /* not scheduled */
195 } else {
196 event->nanosecs = NOT_SCHEDULED;
197 if( event->seconds != 0 ) { /* long term event */
198 event_dequeue_long( event );
199 } else {
200 event_dequeue( event );
201 }
202 }
203 }
206 void event_execute()
207 {
208 /* Loop in case we missed some or got a couple scheduled for the same time */
209 while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
210 event_t event = event_head;
211 event_head = event->next;
212 event->nanosecs = NOT_SCHEDULED;
213 // Note: Make sure the internal state is consistent before calling the
214 // user function, as it will (quite likely) enqueue another event.
215 event->func( event->id );
216 }
218 event_update_pending();
219 }
221 void event_asic_callback( int eventid )
222 {
223 asic_event( eventid );
224 }
226 void event_init()
227 {
228 int i;
229 for( i=0; i<MAX_EVENT_ID; i++ ) {
230 events[i].id = i;
231 events[i].nanosecs = NOT_SCHEDULED;
232 if( i < 96 ) {
233 events[i].func = event_asic_callback;
234 } else {
235 events[i].func = NULL;
236 }
237 events[i].next = NULL;
238 }
239 event_head = NULL;
240 long_event_head = NULL;
241 long_scan_time_remaining = LONG_SCAN_PERIOD;
242 }
246 void event_reset()
247 {
248 int i;
249 event_head = NULL;
250 long_event_head = NULL;
251 long_scan_time_remaining = LONG_SCAN_PERIOD;
252 for( i=0; i<MAX_EVENT_ID; i++ ) {
253 events[i].nanosecs = NOT_SCHEDULED;
254 }
255 }
257 void event_save_state( FILE *f )
258 {
259 int id, i;
260 id = event_head == NULL ? -1 : event_head->id;
261 fwrite( &id, sizeof(id), 1, f );
262 id = long_event_head == NULL ? -1 : long_event_head->id;
263 fwrite( &id, sizeof(id), 1, f );
264 fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
265 for( i=0; i<MAX_EVENT_ID; i++ ) {
266 fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
267 id = events[i].next == NULL ? -1 : events[i].next->id;
268 fwrite( &id, sizeof(id), 1, f );
269 }
270 }
272 int event_load_state( FILE *f )
273 {
274 int id, i;
275 fread( &id, sizeof(id), 1, f );
276 event_head = id == -1 ? NULL : &events[id];
277 fread( &id, sizeof(id), 1, f );
278 long_event_head = id == -1 ? NULL : &events[id];
279 fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
280 for( i=0; i<MAX_EVENT_ID; i++ ) {
281 fread( &events[i].id, sizeof(uint32_t), 3, f );
282 fread( &id, sizeof(id), 1, f );
283 events[i].next = id == -1 ? NULL : &events[id];
284 }
285 return 0;
286 }
288 /**
289 * Scan all entries in the long queue, decrementing each by 1 second. Entries
290 * that are now < 1 second are moved to the short queue.
291 */
292 static void event_scan_long()
293 {
294 while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
295 event_t event = long_event_head;
296 long_event_head = event->next;
297 event_enqueue(event);
298 }
300 if( long_event_head != NULL ) {
301 event_t last = long_event_head;
302 event_t cur = last->next;
303 while( cur != NULL ) {
304 if( --cur->seconds == 0 ) {
305 last->next = cur->next;
306 event_enqueue(cur);
307 } else {
308 last = cur;
309 }
310 cur = last->next;
311 }
312 }
313 }
315 /**
316 * Decrement the event time on all pending events by the supplied nanoseconds.
317 * It may or may not be faster to wrap around instead, but this has the benefit
318 * of simplicity.
319 */
320 uint32_t event_run_slice( uint32_t nanosecs )
321 {
322 event_t event = event_head;
323 while( event != NULL ) {
324 if( event->nanosecs <= nanosecs ) {
325 event->nanosecs = 0;
326 } else {
327 event->nanosecs -= nanosecs;
328 }
329 event = event->next;
330 }
332 long_scan_time_remaining -= nanosecs;
333 if( long_scan_time_remaining <= 0 ) {
334 long_scan_time_remaining += LONG_SCAN_PERIOD;
335 event_scan_long();
336 }
338 event_update_pending();
339 return nanosecs;
340 }
.