1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/src/eventq.c Tue Jan 16 10:34:46 2007 +0000
1.5 + * $Id: eventq.c,v 1.1 2007-01-06 04:06:36 nkeynes Exp $
1.7 + * Simple implementation of one-shot timers. Effectively this allows IO
1.8 + * devices to wait until a particular time before completing. We expect
1.9 + * there to be at least half a dozen or so continually scheduled events
1.10 + * (TMU and PVR2), peaking around 20+.
1.12 + * Copyright (c) 2005 Nathan Keynes.
1.14 + * This program is free software; you can redistribute it and/or modify
1.15 + * it under the terms of the GNU General Public License as published by
1.16 + * the Free Software Foundation; either version 2 of the License, or
1.17 + * (at your option) any later version.
1.19 + * This program is distributed in the hope that it will be useful,
1.20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1.21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.22 + * GNU General Public License for more details.
1.25 +#include <assert.h>
1.26 +#include "dreamcast.h"
1.27 +#include "eventq.h"
1.28 +#include "sh4core.h"
1.30 +#define LONG_SCAN_PERIOD 1000000000 /* 1 second */
1.32 +typedef struct event {
1.35 + uint32_t nanosecs;
1.36 + event_func_t func;
1.38 + struct event *next;
1.41 +static struct event events[MAX_EVENT_ID];
1.44 + * Countdown to the next scan of the long-duration list (greater than 1 second).
1.46 +static int long_scan_time_remaining;
1.48 +static event_t event_head;
1.49 +static event_t long_event_head;
1.51 +void event_reset();
1.53 +uint32_t event_run_slice( uint32_t nanosecs );
1.54 +void event_save_state( FILE *f );
1.55 +int event_load_state( FILE * f );
1.57 +struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
1.58 + NULL, event_save_state, event_load_state };
1.60 +static void event_update_pending( )
1.62 + if( event_head == NULL ) {
1.63 + if( !(sh4r.event_types & PENDING_IRQ) ) {
1.64 + sh4r.event_pending = NOT_SCHEDULED;
1.66 + sh4r.event_types &= (~PENDING_EVENT);
1.68 + if( !(sh4r.event_types & PENDING_IRQ) ) {
1.69 + sh4r.event_pending = event_head->nanosecs;
1.71 + sh4r.event_types |= PENDING_EVENT;
1.75 +uint32_t event_get_next_time( )
1.77 + if( event_head == NULL ) {
1.78 + return NOT_SCHEDULED;
1.80 + return event_head->nanosecs;
1.85 + * Add the event to the short queue.
1.87 +static void event_enqueue( event_t event )
1.89 + if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
1.90 + event->next = event_head;
1.91 + event_head = event;
1.92 + event_update_pending();
1.94 + event_t cur = event_head;
1.95 + event_t next = cur->next;
1.96 + while( next != NULL && event->nanosecs >= next->nanosecs ) {
1.100 + event->next = next;
1.101 + cur->next = event;
1.105 +static void event_dequeue( event_t event )
1.107 + if( event_head == NULL ) {
1.108 + ERROR( "Empty event queue but should contain event %d", event->id );
1.109 + } else if( event_head == event ) {
1.110 + /* removing queue head */
1.111 + event_head = event_head->next;
1.112 + event_update_pending();
1.114 + event_t cur = event_head;
1.115 + event_t next = cur->next;
1.116 + while( next != NULL ) {
1.117 + if( next == event ) {
1.118 + cur->next = next->next;
1.122 + next = cur->next;
1.127 +static void event_dequeue_long( event_t event )
1.129 + if( long_event_head == NULL ) {
1.130 + ERROR( "Empty long event queue but should contain event %d", event->id );
1.131 + } else if( long_event_head == event ) {
1.132 + /* removing queue head */
1.133 + long_event_head = long_event_head->next;
1.135 + event_t cur = long_event_head;
1.136 + event_t next = cur->next;
1.137 + while( next != NULL ) {
1.138 + if( next == event ) {
1.139 + cur->next = next->next;
1.143 + next = cur->next;
1.148 +void register_event_callback( int eventid, event_func_t func )
1.150 + events[eventid].func = func;
1.153 +void event_schedule( int eventid, uint32_t nanosecs )
1.157 + nanosecs += sh4r.slice_cycle;
1.159 + event_t event = &events[eventid];
1.161 + if( event->nanosecs != NOT_SCHEDULED ) {
1.162 + /* Event is already scheduled. Remove it from the list first */
1.163 + event_cancel(eventid);
1.166 + event->id = eventid;
1.167 + event->seconds = 0;
1.168 + event->nanosecs = nanosecs;
1.170 + event_enqueue( event );
1.173 +void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
1.174 + if( seconds == 0 ) {
1.175 + event_schedule( eventid, nanosecs );
1.177 + event_t event = &events[eventid];
1.179 + if( event->nanosecs != NOT_SCHEDULED ) {
1.180 + /* Event is already scheduled. Remove it from the list first */
1.181 + event_cancel(eventid);
1.184 + event->id = eventid;
1.185 + event->seconds = seconds;
1.186 + event->nanosecs = nanosecs;
1.187 + event->next = long_event_head;
1.188 + long_event_head = event;
1.193 +void event_cancel( int eventid )
1.195 + event_t event = &events[eventid];
1.196 + if( event->nanosecs == NOT_SCHEDULED ) {
1.197 + return; /* not scheduled */
1.199 + event->nanosecs = NOT_SCHEDULED;
1.200 + if( event->seconds != 0 ) { /* long term event */
1.201 + event_dequeue_long( event );
1.203 + event_dequeue( event );
1.209 +void event_execute()
1.211 + /* Loop in case we missed some or got a couple scheduled for the same time */
1.212 + while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
1.213 + event_t event = event_head;
1.214 + event_head = event->next;
1.215 + event->nanosecs = NOT_SCHEDULED;
1.216 + // Note: Make sure the internal state is consistent before calling the
1.217 + // user function, as it will (quite likely) enqueue another event.
1.218 + event->func( event->id );
1.221 + event_update_pending();
1.224 +void event_asic_callback( int eventid )
1.226 + asic_event( eventid );
1.232 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.233 + events[i].id = i;
1.234 + events[i].nanosecs = NOT_SCHEDULED;
1.236 + events[i].func = event_asic_callback;
1.238 + events[i].func = NULL;
1.240 + events[i].next = NULL;
1.242 + event_head = NULL;
1.243 + long_event_head = NULL;
1.244 + long_scan_time_remaining = LONG_SCAN_PERIOD;
1.249 +void event_reset()
1.252 + event_head = NULL;
1.253 + long_event_head = NULL;
1.254 + long_scan_time_remaining = LONG_SCAN_PERIOD;
1.255 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.256 + events[i].nanosecs = NOT_SCHEDULED;
1.260 +void event_save_state( FILE *f )
1.263 + id = event_head == NULL ? -1 : event_head->id;
1.264 + fwrite( &id, sizeof(id), 1, f );
1.265 + id = long_event_head == NULL ? -1 : long_event_head->id;
1.266 + fwrite( &id, sizeof(id), 1, f );
1.267 + fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
1.268 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.269 + fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
1.270 + id = events[i].next == NULL ? -1 : events[i].next->id;
1.271 + fwrite( &id, sizeof(id), 1, f );
1.275 +int event_load_state( FILE *f )
1.278 + fread( &id, sizeof(id), 1, f );
1.279 + event_head = id == -1 ? NULL : &events[id];
1.280 + fread( &id, sizeof(id), 1, f );
1.281 + long_event_head = id == -1 ? NULL : &events[id];
1.282 + fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
1.283 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.284 + fread( &events[i].id, sizeof(uint32_t), 3, f );
1.285 + fread( &id, sizeof(id), 1, f );
1.286 + events[i].next = id == -1 ? NULL : &events[id];
1.292 + * Scan all entries in the long queue, decrementing each by 1 second. Entries
1.293 + * that are now < 1 second are moved to the short queue.
1.295 +static void event_scan_long()
1.297 + while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
1.298 + event_t event = long_event_head;
1.299 + long_event_head = event->next;
1.300 + event_enqueue(event);
1.303 + if( long_event_head != NULL ) {
1.304 + event_t last = long_event_head;
1.305 + event_t cur = last->next;
1.306 + while( cur != NULL ) {
1.307 + if( --cur->seconds == 0 ) {
1.308 + last->next = cur->next;
1.309 + event_enqueue(cur);
1.313 + cur = last->next;
1.319 + * Decrement the event time on all pending events by the supplied nanoseconds.
1.320 + * It may or may not be faster to wrap around instead, but this has the benefit
1.323 +uint32_t event_run_slice( uint32_t nanosecs )
1.325 + event_t event = event_head;
1.326 + while( event != NULL ) {
1.327 + if( event->nanosecs <= nanosecs ) {
1.328 + event->nanosecs = 0;
1.330 + event->nanosecs -= nanosecs;
1.332 + event = event->next;
1.335 + long_scan_time_remaining -= nanosecs;
1.336 + if( long_scan_time_remaining <= 0 ) {
1.337 + long_scan_time_remaining += LONG_SCAN_PERIOD;
1.338 + event_scan_long();
1.341 + event_update_pending();