filename | src/eventq.c |
changeset | 422:61a0598e07ff |
prev | 265:5daf59b7f31b |
next | 561:533f6b478071 |
next | 586:2a3ba82cf243 |
author | nkeynes |
date | Sat Nov 17 01:17:01 2007 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Skip over the "undefined" instructions that actually pass on the SH4 - leaving in until we can determine if they actually do anything, or if they're effectively NOPs |
view | annotate | diff | log | raw |
1 /**
2 * $Id: eventq.c,v 1.2 2007-10-06 08:59:42 nkeynes Exp $
3 *
4 * Simple implementation of one-shot timers. Effectively this allows IO
5 * devices to wait until a particular time before completing. We expect
6 * there to be at least half a dozen or so continually scheduled events
7 * (TMU and PVR2), peaking around 20+.
8 *
9 * Copyright (c) 2005 Nathan Keynes.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
22 #include <assert.h>
23 #include "dreamcast.h"
24 #include "eventq.h"
25 #include "asic.h"
26 #include "sh4core.h"
28 #define LONG_SCAN_PERIOD 1000000000 /* 1 second */
30 typedef struct event {
31 uint32_t id;
32 uint32_t seconds;
33 uint32_t nanosecs;
34 event_func_t func;
36 struct event *next;
37 } *event_t;
39 static struct event events[MAX_EVENT_ID];
41 /**
42 * Countdown to the next scan of the long-duration list (greater than 1 second).
43 */
44 static int long_scan_time_remaining;
46 static event_t event_head;
47 static event_t long_event_head;
49 void event_reset();
50 void event_init();
51 uint32_t event_run_slice( uint32_t nanosecs );
52 void event_save_state( FILE *f );
53 int event_load_state( FILE * f );
55 struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
56 NULL, event_save_state, event_load_state };
58 static void event_update_pending( )
59 {
60 if( event_head == NULL ) {
61 if( !(sh4r.event_types & PENDING_IRQ) ) {
62 sh4r.event_pending = NOT_SCHEDULED;
63 }
64 sh4r.event_types &= (~PENDING_EVENT);
65 } else {
66 if( !(sh4r.event_types & PENDING_IRQ) ) {
67 sh4r.event_pending = event_head->nanosecs;
68 }
69 sh4r.event_types |= PENDING_EVENT;
70 }
71 }
73 uint32_t event_get_next_time( )
74 {
75 if( event_head == NULL ) {
76 return NOT_SCHEDULED;
77 } else {
78 return event_head->nanosecs;
79 }
80 }
82 /**
83 * Add the event to the short queue.
84 */
85 static void event_enqueue( event_t event )
86 {
87 if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
88 event->next = event_head;
89 event_head = event;
90 event_update_pending();
91 } else {
92 event_t cur = event_head;
93 event_t next = cur->next;
94 while( next != NULL && event->nanosecs >= next->nanosecs ) {
95 cur = next;
96 next = cur->next;
97 }
98 event->next = next;
99 cur->next = event;
100 }
101 }
103 static void event_dequeue( event_t event )
104 {
105 if( event_head == NULL ) {
106 ERROR( "Empty event queue but should contain event %d", event->id );
107 } else if( event_head == event ) {
108 /* removing queue head */
109 event_head = event_head->next;
110 event_update_pending();
111 } else {
112 event_t cur = event_head;
113 event_t next = cur->next;
114 while( next != NULL ) {
115 if( next == event ) {
116 cur->next = next->next;
117 break;
118 }
119 cur = next;
120 next = cur->next;
121 }
122 }
123 }
125 static void event_dequeue_long( event_t event )
126 {
127 if( long_event_head == NULL ) {
128 ERROR( "Empty long event queue but should contain event %d", event->id );
129 } else if( long_event_head == event ) {
130 /* removing queue head */
131 long_event_head = long_event_head->next;
132 } else {
133 event_t cur = long_event_head;
134 event_t next = cur->next;
135 while( next != NULL ) {
136 if( next == event ) {
137 cur->next = next->next;
138 break;
139 }
140 cur = next;
141 next = cur->next;
142 }
143 }
144 }
146 void register_event_callback( int eventid, event_func_t func )
147 {
148 events[eventid].func = func;
149 }
151 void event_schedule( int eventid, uint32_t nanosecs )
152 {
153 nanosecs += sh4r.slice_cycle;
155 event_t event = &events[eventid];
157 if( event->nanosecs != NOT_SCHEDULED ) {
158 /* Event is already scheduled. Remove it from the list first */
159 event_cancel(eventid);
160 }
162 event->id = eventid;
163 event->seconds = 0;
164 event->nanosecs = nanosecs;
166 event_enqueue( event );
167 }
169 void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
170 if( seconds == 0 ) {
171 event_schedule( eventid, nanosecs );
172 } else {
173 event_t event = &events[eventid];
175 if( event->nanosecs != NOT_SCHEDULED ) {
176 /* Event is already scheduled. Remove it from the list first */
177 event_cancel(eventid);
178 }
180 event->id = eventid;
181 event->seconds = seconds;
182 event->nanosecs = nanosecs;
183 event->next = long_event_head;
184 long_event_head = event;
185 }
187 }
189 void event_cancel( int eventid )
190 {
191 event_t event = &events[eventid];
192 if( event->nanosecs == NOT_SCHEDULED ) {
193 return; /* not scheduled */
194 } else {
195 event->nanosecs = NOT_SCHEDULED;
196 if( event->seconds != 0 ) { /* long term event */
197 event_dequeue_long( event );
198 } else {
199 event_dequeue( event );
200 }
201 }
202 }
205 void event_execute()
206 {
207 /* Loop in case we missed some or got a couple scheduled for the same time */
208 while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
209 event_t event = event_head;
210 event_head = event->next;
211 event->nanosecs = NOT_SCHEDULED;
212 // Note: Make sure the internal state is consistent before calling the
213 // user function, as it will (quite likely) enqueue another event.
214 event->func( event->id );
215 }
217 event_update_pending();
218 }
220 void event_asic_callback( int eventid )
221 {
222 asic_event( eventid );
223 }
225 void event_init()
226 {
227 int i;
228 for( i=0; i<MAX_EVENT_ID; i++ ) {
229 events[i].id = i;
230 events[i].nanosecs = NOT_SCHEDULED;
231 if( i < 96 ) {
232 events[i].func = event_asic_callback;
233 } else {
234 events[i].func = NULL;
235 }
236 events[i].next = NULL;
237 }
238 event_head = NULL;
239 long_event_head = NULL;
240 long_scan_time_remaining = LONG_SCAN_PERIOD;
241 }
245 void event_reset()
246 {
247 int i;
248 event_head = NULL;
249 long_event_head = NULL;
250 long_scan_time_remaining = LONG_SCAN_PERIOD;
251 for( i=0; i<MAX_EVENT_ID; i++ ) {
252 events[i].nanosecs = NOT_SCHEDULED;
253 }
254 }
256 void event_save_state( FILE *f )
257 {
258 int id, i;
259 id = event_head == NULL ? -1 : event_head->id;
260 fwrite( &id, sizeof(id), 1, f );
261 id = long_event_head == NULL ? -1 : long_event_head->id;
262 fwrite( &id, sizeof(id), 1, f );
263 fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
264 for( i=0; i<MAX_EVENT_ID; i++ ) {
265 fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
266 id = events[i].next == NULL ? -1 : events[i].next->id;
267 fwrite( &id, sizeof(id), 1, f );
268 }
269 }
271 int event_load_state( FILE *f )
272 {
273 int id, i;
274 fread( &id, sizeof(id), 1, f );
275 event_head = id == -1 ? NULL : &events[id];
276 fread( &id, sizeof(id), 1, f );
277 long_event_head = id == -1 ? NULL : &events[id];
278 fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
279 for( i=0; i<MAX_EVENT_ID; i++ ) {
280 fread( &events[i].id, sizeof(uint32_t), 3, f );
281 fread( &id, sizeof(id), 1, f );
282 events[i].next = id == -1 ? NULL : &events[id];
283 }
284 return 0;
285 }
287 /**
288 * Scan all entries in the long queue, decrementing each by 1 second. Entries
289 * that are now < 1 second are moved to the short queue.
290 */
291 static void event_scan_long()
292 {
293 while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
294 event_t event = long_event_head;
295 long_event_head = event->next;
296 event_enqueue(event);
297 }
299 if( long_event_head != NULL ) {
300 event_t last = long_event_head;
301 event_t cur = last->next;
302 while( cur != NULL ) {
303 if( --cur->seconds == 0 ) {
304 last->next = cur->next;
305 event_enqueue(cur);
306 } else {
307 last = cur;
308 }
309 cur = last->next;
310 }
311 }
312 }
314 /**
315 * Decrement the event time on all pending events by the supplied nanoseconds.
316 * It may or may not be faster to wrap around instead, but this has the benefit
317 * of simplicity.
318 */
319 uint32_t event_run_slice( uint32_t nanosecs )
320 {
321 event_t event = event_head;
322 while( event != NULL ) {
323 if( event->nanosecs <= nanosecs ) {
324 event->nanosecs = 0;
325 } else {
326 event->nanosecs -= nanosecs;
327 }
328 event = event->next;
329 }
331 long_scan_time_remaining -= nanosecs;
332 if( long_scan_time_remaining <= 0 ) {
333 long_scan_time_remaining += LONG_SCAN_PERIOD;
334 event_scan_long();
335 }
337 event_update_pending();
338 return nanosecs;
339 }
.