filename | src/eventq.c |
changeset | 736:a02d1475ccfd |
prev | 730:a0f02e769c2e |
next | 1065:bc1cc0c54917 |
author | nkeynes |
date | Mon Jul 28 00:27:32 2008 +0000 (15 years ago) |
permissions | -rw-r--r-- |
last change | Add -headerpad_max_install_names linker flag for OS/X builds - ensures bundle build doesn't run out of space for name changes |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * Simple implementation of one-shot timers. Effectively this allows IO
5 * devices to wait until a particular time before completing. We expect
6 * there to be at least half a dozen or so continually scheduled events
7 * (TMU and PVR2), peaking around 20+.
8 *
9 * Copyright (c) 2005 Nathan Keynes.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
22 #include <assert.h>
23 #include "dream.h"
24 #include "dreamcast.h"
25 #include "eventq.h"
26 #include "asic.h"
27 #include "sh4/sh4.h"
29 #define LONG_SCAN_PERIOD 1000000000 /* 1 second */
31 typedef struct event {
32 uint32_t id;
33 uint32_t seconds;
34 uint32_t nanosecs;
35 event_func_t func;
37 struct event *next;
38 } *event_t;
40 static struct event events[MAX_EVENT_ID];
42 /**
43 * Countdown to the next scan of the long-duration list (greater than 1 second).
44 */
45 static int long_scan_time_remaining;
47 static event_t event_head;
48 static event_t long_event_head;
50 void event_reset();
51 void event_init();
52 uint32_t event_run_slice( uint32_t nanosecs );
53 void event_save_state( FILE *f );
54 int event_load_state( FILE * f );
56 struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
57 NULL, event_save_state, event_load_state };
59 static void event_update_pending( )
60 {
61 if( event_head == NULL ) {
62 if( !(sh4r.event_types & PENDING_IRQ) ) {
63 sh4r.event_pending = NOT_SCHEDULED;
64 }
65 sh4r.event_types &= (~PENDING_EVENT);
66 } else {
67 if( !(sh4r.event_types & PENDING_IRQ) ) {
68 sh4r.event_pending = event_head->nanosecs;
69 }
70 sh4r.event_types |= PENDING_EVENT;
71 }
72 }
74 uint32_t event_get_next_time( )
75 {
76 if( event_head == NULL ) {
77 return NOT_SCHEDULED;
78 } else {
79 return event_head->nanosecs;
80 }
81 }
83 /**
84 * Add the event to the short queue.
85 */
86 static void event_enqueue( event_t event )
87 {
88 if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
89 event->next = event_head;
90 event_head = event;
91 event_update_pending();
92 } else {
93 event_t cur = event_head;
94 event_t next = cur->next;
95 while( next != NULL && event->nanosecs >= next->nanosecs ) {
96 cur = next;
97 next = cur->next;
98 }
99 event->next = next;
100 cur->next = event;
101 }
102 }
104 static void event_dequeue( event_t event )
105 {
106 if( event_head == NULL ) {
107 ERROR( "Empty event queue but should contain event %d", event->id );
108 } else if( event_head == event ) {
109 /* removing queue head */
110 event_head = event_head->next;
111 event_update_pending();
112 } else {
113 event_t cur = event_head;
114 event_t next = cur->next;
115 while( next != NULL ) {
116 if( next == event ) {
117 cur->next = next->next;
118 break;
119 }
120 cur = next;
121 next = cur->next;
122 }
123 }
124 }
126 static void event_dequeue_long( event_t event )
127 {
128 if( long_event_head == NULL ) {
129 ERROR( "Empty long event queue but should contain event %d", event->id );
130 } else if( long_event_head == event ) {
131 /* removing queue head */
132 long_event_head = long_event_head->next;
133 } else {
134 event_t cur = long_event_head;
135 event_t next = cur->next;
136 while( next != NULL ) {
137 if( next == event ) {
138 cur->next = next->next;
139 break;
140 }
141 cur = next;
142 next = cur->next;
143 }
144 }
145 }
147 void register_event_callback( int eventid, event_func_t func )
148 {
149 events[eventid].func = func;
150 }
152 void event_schedule( int eventid, uint32_t nanosecs )
153 {
154 nanosecs += sh4r.slice_cycle;
156 event_t event = &events[eventid];
158 if( event->nanosecs != NOT_SCHEDULED ) {
159 /* Event is already scheduled. Remove it from the list first */
160 event_cancel(eventid);
161 }
163 event->id = eventid;
164 event->seconds = 0;
165 event->nanosecs = nanosecs;
167 event_enqueue( event );
168 }
170 void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
171 if( seconds == 0 ) {
172 event_schedule( eventid, nanosecs );
173 } else {
174 event_t event = &events[eventid];
176 if( event->nanosecs != NOT_SCHEDULED ) {
177 /* Event is already scheduled. Remove it from the list first */
178 event_cancel(eventid);
179 }
181 event->id = eventid;
182 event->seconds = seconds;
183 event->nanosecs = nanosecs;
184 event->next = long_event_head;
185 long_event_head = event;
186 }
188 }
190 void event_cancel( int eventid )
191 {
192 event_t event = &events[eventid];
193 if( event->nanosecs == NOT_SCHEDULED ) {
194 return; /* not scheduled */
195 } else {
196 event->nanosecs = NOT_SCHEDULED;
197 if( event->seconds != 0 ) { /* long term event */
198 event_dequeue_long( event );
199 } else {
200 event_dequeue( event );
201 }
202 }
203 }
206 void event_execute()
207 {
208 /* Loop in case we missed some or got a couple scheduled for the same time */
209 while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
210 event_t event = event_head;
211 event_head = event->next;
212 event->nanosecs = NOT_SCHEDULED;
213 // Note: Make sure the internal state is consistent before calling the
214 // user function, as it will (quite likely) enqueue another event.
215 event->func( event->id );
216 }
218 event_update_pending();
219 }
221 void event_asic_callback( int eventid )
222 {
223 asic_event( eventid );
224 }
226 void event_init()
227 {
228 int i;
229 for( i=0; i<MAX_EVENT_ID; i++ ) {
230 events[i].id = i;
231 events[i].nanosecs = NOT_SCHEDULED;
232 if( i < 96 ) {
233 events[i].func = event_asic_callback;
234 } else {
235 events[i].func = NULL;
236 }
237 events[i].next = NULL;
238 }
239 event_head = NULL;
240 long_event_head = NULL;
241 long_scan_time_remaining = LONG_SCAN_PERIOD;
242 }
246 void event_reset()
247 {
248 int i;
249 event_head = NULL;
250 long_event_head = NULL;
251 long_scan_time_remaining = LONG_SCAN_PERIOD;
252 for( i=0; i<MAX_EVENT_ID; i++ ) {
253 events[i].nanosecs = NOT_SCHEDULED;
254 }
255 }
257 void event_save_state( FILE *f )
258 {
259 int32_t id, i;
260 id = event_head == NULL ? -1 : event_head->id;
261 fwrite( &id, sizeof(id), 1, f );
262 id = long_event_head == NULL ? -1 : long_event_head->id;
263 fwrite( &id, sizeof(id), 1, f );
264 fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
265 for( i=0; i<MAX_EVENT_ID; i++ ) {
266 fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
267 id = events[i].next == NULL ? -1 : events[i].next->id;
268 fwrite( &id, sizeof(id), 1, f );
269 }
270 }
272 int event_load_state( FILE *f )
273 {
274 int32_t id, i;
275 fread( &id, sizeof(id), 1, f );
276 event_head = id == -1 ? NULL : &events[id];
277 fread( &id, sizeof(id), 1, f );
278 long_event_head = id == -1 ? NULL : &events[id];
279 fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
280 for( i=0; i<MAX_EVENT_ID; i++ ) {
281 fread( &events[i].id, sizeof(uint32_t), 3, f );
282 fread( &id, sizeof(id), 1, f );
283 events[i].next = id == -1 ? NULL : &events[id];
284 }
285 return 0;
286 }
288 /**
289 * Scan all entries in the long queue, decrementing each by 1 second. Entries
290 * that are now < 1 second are moved to the short queue.
291 */
292 static void event_scan_long()
293 {
294 while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
295 event_t event = long_event_head;
296 long_event_head = event->next;
297 event_enqueue(event);
298 }
300 if( long_event_head != NULL ) {
301 event_t last = long_event_head;
302 event_t cur = last->next;
303 while( cur != NULL ) {
304 if( --cur->seconds == 0 ) {
305 last->next = cur->next;
306 event_enqueue(cur);
307 } else {
308 last = cur;
309 }
310 cur = last->next;
311 }
312 }
313 }
315 /**
316 * Decrement the event time on all pending events by the supplied nanoseconds.
317 * It may or may not be faster to wrap around instead, but this has the benefit
318 * of simplicity.
319 */
320 uint32_t event_run_slice( uint32_t nanosecs )
321 {
322 event_t event = event_head;
323 while( event != NULL ) {
324 if( event->nanosecs <= nanosecs ) {
325 event->nanosecs = 0;
326 } else {
327 event->nanosecs -= nanosecs;
328 }
329 event = event->next;
330 }
332 long_scan_time_remaining -= nanosecs;
333 if( long_scan_time_remaining <= 0 ) {
334 long_scan_time_remaining += LONG_SCAN_PERIOD;
335 event_scan_long();
336 }
338 event_update_pending();
339 return nanosecs;
340 }
.