4 * Simple implementation of one-shot timers. Effectively this allows IO
5 * devices to wait until a particular time before completing. We expect
6 * there to be at least half a dozen or so continually scheduled events
7 * (TMU and PVR2), peaking around 20+.
9 * Copyright (c) 2005 Nathan Keynes.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
24 #include "dreamcast.h"
29 #define LONG_SCAN_PERIOD 1000000000 /* 1 second */
31 typedef struct event {
40 static struct event events[MAX_EVENT_ID];
43 * Countdown to the next scan of the long-duration list (greater than 1 second).
45 static int long_scan_time_remaining;
47 static event_t event_head;
48 static event_t long_event_head;
52 uint32_t event_run_slice( uint32_t nanosecs );
53 void event_save_state( FILE *f );
54 int event_load_state( FILE * f );
56 struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
57 NULL, event_save_state, event_load_state };
59 static void event_update_pending( )
61 if( event_head == NULL ) {
62 if( !(sh4r.event_types & PENDING_IRQ) ) {
63 sh4r.event_pending = NOT_SCHEDULED;
65 sh4r.event_types &= (~PENDING_EVENT);
67 if( !(sh4r.event_types & PENDING_IRQ) ) {
68 sh4r.event_pending = event_head->nanosecs;
70 sh4r.event_types |= PENDING_EVENT;
74 uint32_t event_get_next_time( )
76 if( event_head == NULL ) {
79 return event_head->nanosecs;
84 * Add the event to the short queue.
86 static void event_enqueue( event_t event )
88 if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
89 event->next = event_head;
91 event_update_pending();
93 event_t cur = event_head;
94 event_t next = cur->next;
95 while( next != NULL && event->nanosecs >= next->nanosecs ) {
104 static void event_dequeue( event_t event )
106 if( event_head == NULL ) {
107 ERROR( "Empty event queue but should contain event %d", event->id );
108 } else if( event_head == event ) {
109 /* removing queue head */
110 event_head = event_head->next;
111 event_update_pending();
113 event_t cur = event_head;
114 event_t next = cur->next;
115 while( next != NULL ) {
116 if( next == event ) {
117 cur->next = next->next;
126 static void event_dequeue_long( event_t event )
128 if( long_event_head == NULL ) {
129 ERROR( "Empty long event queue but should contain event %d", event->id );
130 } else if( long_event_head == event ) {
131 /* removing queue head */
132 long_event_head = long_event_head->next;
134 event_t cur = long_event_head;
135 event_t next = cur->next;
136 while( next != NULL ) {
137 if( next == event ) {
138 cur->next = next->next;
147 void register_event_callback( int eventid, event_func_t func )
149 events[eventid].func = func;
152 void event_schedule( int eventid, uint32_t nanosecs )
154 nanosecs += sh4r.slice_cycle;
156 event_t event = &events[eventid];
158 if( event->nanosecs != NOT_SCHEDULED ) {
159 /* Event is already scheduled. Remove it from the list first */
160 event_cancel(eventid);
165 event->nanosecs = nanosecs;
167 event_enqueue( event );
170 void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
172 event_schedule( eventid, nanosecs );
174 event_t event = &events[eventid];
176 if( event->nanosecs != NOT_SCHEDULED ) {
177 /* Event is already scheduled. Remove it from the list first */
178 event_cancel(eventid);
182 event->seconds = seconds;
183 event->nanosecs = nanosecs;
184 event->next = long_event_head;
185 long_event_head = event;
190 void event_cancel( int eventid )
192 event_t event = &events[eventid];
193 if( event->nanosecs == NOT_SCHEDULED ) {
194 return; /* not scheduled */
196 event->nanosecs = NOT_SCHEDULED;
197 if( event->seconds != 0 ) { /* long term event */
198 event_dequeue_long( event );
200 event_dequeue( event );
208 /* Loop in case we missed some or got a couple scheduled for the same time */
209 while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
210 event_t event = event_head;
211 event_head = event->next;
212 event->nanosecs = NOT_SCHEDULED;
213 // Note: Make sure the internal state is consistent before calling the
214 // user function, as it will (quite likely) enqueue another event.
215 event->func( event->id );
218 event_update_pending();
221 void event_asic_callback( int eventid )
223 asic_event( eventid );
229 for( i=0; i<MAX_EVENT_ID; i++ ) {
231 events[i].nanosecs = NOT_SCHEDULED;
233 events[i].func = event_asic_callback;
235 events[i].func = NULL;
237 events[i].next = NULL;
240 long_event_head = NULL;
241 long_scan_time_remaining = LONG_SCAN_PERIOD;
250 long_event_head = NULL;
251 long_scan_time_remaining = LONG_SCAN_PERIOD;
252 for( i=0; i<MAX_EVENT_ID; i++ ) {
253 events[i].nanosecs = NOT_SCHEDULED;
257 void event_save_state( FILE *f )
260 id = event_head == NULL ? -1 : event_head->id;
261 fwrite( &id, sizeof(id), 1, f );
262 id = long_event_head == NULL ? -1 : long_event_head->id;
263 fwrite( &id, sizeof(id), 1, f );
264 fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
265 for( i=0; i<MAX_EVENT_ID; i++ ) {
266 fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
267 id = events[i].next == NULL ? -1 : events[i].next->id;
268 fwrite( &id, sizeof(id), 1, f );
272 int event_load_state( FILE *f )
275 fread( &id, sizeof(id), 1, f );
276 event_head = id == -1 ? NULL : &events[id];
277 fread( &id, sizeof(id), 1, f );
278 long_event_head = id == -1 ? NULL : &events[id];
279 fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
280 for( i=0; i<MAX_EVENT_ID; i++ ) {
281 fread( &events[i].id, sizeof(uint32_t), 3, f );
282 fread( &id, sizeof(id), 1, f );
283 events[i].next = id == -1 ? NULL : &events[id];
289 * Scan all entries in the long queue, decrementing each by 1 second. Entries
290 * that are now < 1 second are moved to the short queue.
292 static void event_scan_long()
294 while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
295 event_t event = long_event_head;
296 long_event_head = event->next;
297 event_enqueue(event);
300 if( long_event_head != NULL ) {
301 event_t last = long_event_head;
302 event_t cur = last->next;
303 while( cur != NULL ) {
304 if( --cur->seconds == 0 ) {
305 last->next = cur->next;
316 * Decrement the event time on all pending events by the supplied nanoseconds.
317 * It may or may not be faster to wrap around instead, but this has the benefit
320 uint32_t event_run_slice( uint32_t nanosecs )
322 event_t event = event_head;
323 while( event != NULL ) {
324 if( event->nanosecs <= nanosecs ) {
327 event->nanosecs -= nanosecs;
332 long_scan_time_remaining -= nanosecs;
333 if( long_scan_time_remaining <= 0 ) {
334 long_scan_time_remaining += LONG_SCAN_PERIOD;
338 event_update_pending();
.