4 * Simple implementation of one-shot timers. Effectively this allows IO
5 * devices to wait until a particular time before completing. We expect
6 * there to be at least half a dozen or so continually scheduled events
7 * (TMU and PVR2), peaking around 20+.
9 * Copyright (c) 2005 Nathan Keynes.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include "dreamcast.h"
28 #define LONG_SCAN_PERIOD 1000000000 /* 1 second */
30 typedef struct event {
39 static struct event events[MAX_EVENT_ID];
42 * Countdown to the next scan of the long-duration list (greater than 1 second).
44 static int long_scan_time_remaining;
46 static event_t event_head;
47 static event_t long_event_head;
51 uint32_t event_run_slice( uint32_t nanosecs );
52 void event_save_state( FILE *f );
53 int event_load_state( FILE * f );
55 struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
56 NULL, event_save_state, event_load_state };
58 static void event_update_pending( )
60 if( event_head == NULL ) {
61 if( !(sh4r.event_types & PENDING_IRQ) ) {
62 sh4r.event_pending = NOT_SCHEDULED;
64 sh4r.event_types &= (~PENDING_EVENT);
66 if( !(sh4r.event_types & PENDING_IRQ) ) {
67 sh4r.event_pending = event_head->nanosecs;
69 sh4r.event_types |= PENDING_EVENT;
73 uint32_t event_get_next_time( )
75 if( event_head == NULL ) {
78 return event_head->nanosecs;
83 * Add the event to the short queue.
85 static void event_enqueue( event_t event )
87 if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
88 event->next = event_head;
90 event_update_pending();
92 event_t cur = event_head;
93 event_t next = cur->next;
94 while( next != NULL && event->nanosecs >= next->nanosecs ) {
103 static void event_dequeue( event_t event )
105 if( event_head == NULL ) {
106 ERROR( "Empty event queue but should contain event %d", event->id );
107 } else if( event_head == event ) {
108 /* removing queue head */
109 event_head = event_head->next;
110 event_update_pending();
112 event_t cur = event_head;
113 event_t next = cur->next;
114 while( next != NULL ) {
115 if( next == event ) {
116 cur->next = next->next;
125 static void event_dequeue_long( event_t event )
127 if( long_event_head == NULL ) {
128 ERROR( "Empty long event queue but should contain event %d", event->id );
129 } else if( long_event_head == event ) {
130 /* removing queue head */
131 long_event_head = long_event_head->next;
133 event_t cur = long_event_head;
134 event_t next = cur->next;
135 while( next != NULL ) {
136 if( next == event ) {
137 cur->next = next->next;
146 void register_event_callback( int eventid, event_func_t func )
148 events[eventid].func = func;
151 void event_schedule( int eventid, uint32_t nanosecs )
153 nanosecs += sh4r.slice_cycle;
155 event_t event = &events[eventid];
157 if( event->nanosecs != NOT_SCHEDULED ) {
158 /* Event is already scheduled. Remove it from the list first */
159 event_cancel(eventid);
164 event->nanosecs = nanosecs;
166 event_enqueue( event );
169 void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
171 event_schedule( eventid, nanosecs );
173 event_t event = &events[eventid];
175 if( event->nanosecs != NOT_SCHEDULED ) {
176 /* Event is already scheduled. Remove it from the list first */
177 event_cancel(eventid);
181 event->seconds = seconds;
182 event->nanosecs = nanosecs;
183 event->next = long_event_head;
184 long_event_head = event;
189 void event_cancel( int eventid )
191 event_t event = &events[eventid];
192 if( event->nanosecs == NOT_SCHEDULED ) {
193 return; /* not scheduled */
195 event->nanosecs = NOT_SCHEDULED;
196 if( event->seconds != 0 ) { /* long term event */
197 event_dequeue_long( event );
199 event_dequeue( event );
207 /* Loop in case we missed some or got a couple scheduled for the same time */
208 while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
209 event_t event = event_head;
210 event_head = event->next;
211 event->nanosecs = NOT_SCHEDULED;
212 // Note: Make sure the internal state is consistent before calling the
213 // user function, as it will (quite likely) enqueue another event.
214 event->func( event->id );
217 event_update_pending();
220 void event_asic_callback( int eventid )
222 asic_event( eventid );
228 for( i=0; i<MAX_EVENT_ID; i++ ) {
230 events[i].nanosecs = NOT_SCHEDULED;
232 events[i].func = event_asic_callback;
234 events[i].func = NULL;
236 events[i].next = NULL;
239 long_event_head = NULL;
240 long_scan_time_remaining = LONG_SCAN_PERIOD;
249 long_event_head = NULL;
250 long_scan_time_remaining = LONG_SCAN_PERIOD;
251 for( i=0; i<MAX_EVENT_ID; i++ ) {
252 events[i].nanosecs = NOT_SCHEDULED;
256 void event_save_state( FILE *f )
259 id = event_head == NULL ? -1 : event_head->id;
260 fwrite( &id, sizeof(id), 1, f );
261 id = long_event_head == NULL ? -1 : long_event_head->id;
262 fwrite( &id, sizeof(id), 1, f );
263 fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
264 for( i=0; i<MAX_EVENT_ID; i++ ) {
265 fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
266 id = events[i].next == NULL ? -1 : events[i].next->id;
267 fwrite( &id, sizeof(id), 1, f );
271 int event_load_state( FILE *f )
274 fread( &id, sizeof(id), 1, f );
275 event_head = id == -1 ? NULL : &events[id];
276 fread( &id, sizeof(id), 1, f );
277 long_event_head = id == -1 ? NULL : &events[id];
278 fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
279 for( i=0; i<MAX_EVENT_ID; i++ ) {
280 fread( &events[i].id, sizeof(uint32_t), 3, f );
281 fread( &id, sizeof(id), 1, f );
282 events[i].next = id == -1 ? NULL : &events[id];
288 * Scan all entries in the long queue, decrementing each by 1 second. Entries
289 * that are now < 1 second are moved to the short queue.
291 static void event_scan_long()
293 while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
294 event_t event = long_event_head;
295 long_event_head = event->next;
296 event_enqueue(event);
299 if( long_event_head != NULL ) {
300 event_t last = long_event_head;
301 event_t cur = last->next;
302 while( cur != NULL ) {
303 if( --cur->seconds == 0 ) {
304 last->next = cur->next;
315 * Decrement the event time on all pending events by the supplied nanoseconds.
316 * It may or may not be faster to wrap around instead, but this has the benefit
319 uint32_t event_run_slice( uint32_t nanosecs )
321 event_t event = event_head;
322 while( event != NULL ) {
323 if( event->nanosecs <= nanosecs ) {
326 event->nanosecs -= nanosecs;
331 long_scan_time_remaining -= nanosecs;
332 if( long_scan_time_remaining <= 0 ) {
333 long_scan_time_remaining += LONG_SCAN_PERIOD;
337 event_update_pending();
.