Search
lxdream.org :: lxdream/src/eventq.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/eventq.c
changeset 265:5daf59b7f31b
next422:61a0598e07ff
author nkeynes
date Sat Jan 06 04:06:36 2007 +0000 (13 years ago)
permissions -rw-r--r--
last change Implement event queue.
Fix pvr2 timing (yes, again).
file annotate diff log raw
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/src/eventq.c Sat Jan 06 04:06:36 2007 +0000
1.3 @@ -0,0 +1,341 @@
1.4 +/**
1.5 + * $Id: eventq.c,v 1.1 2007-01-06 04:06:36 nkeynes Exp $
1.6 + *
1.7 + * Simple implementation of one-shot timers. Effectively this allows IO
1.8 + * devices to wait until a particular time before completing. We expect
1.9 + * there to be at least half a dozen or so continually scheduled events
1.10 + * (TMU and PVR2), peaking around 20+.
1.11 + *
1.12 + * Copyright (c) 2005 Nathan Keynes.
1.13 + *
1.14 + * This program is free software; you can redistribute it and/or modify
1.15 + * it under the terms of the GNU General Public License as published by
1.16 + * the Free Software Foundation; either version 2 of the License, or
1.17 + * (at your option) any later version.
1.18 + *
1.19 + * This program is distributed in the hope that it will be useful,
1.20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1.21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.22 + * GNU General Public License for more details.
1.23 + */
1.24 +
1.25 +#include <assert.h>
1.26 +#include "dreamcast.h"
1.27 +#include "eventq.h"
1.28 +#include "sh4core.h"
1.29 +
1.30 +#define LONG_SCAN_PERIOD 1000000000 /* 1 second */
1.31 +
1.32 +typedef struct event {
1.33 + uint32_t id;
1.34 + uint32_t seconds;
1.35 + uint32_t nanosecs;
1.36 + event_func_t func;
1.37 +
1.38 + struct event *next;
1.39 +} *event_t;
1.40 +
1.41 +static struct event events[MAX_EVENT_ID];
1.42 +
1.43 +/**
1.44 + * Countdown to the next scan of the long-duration list (greater than 1 second).
1.45 + */
1.46 +static int long_scan_time_remaining;
1.47 +
1.48 +static event_t event_head;
1.49 +static event_t long_event_head;
1.50 +
1.51 +void event_reset();
1.52 +void event_init();
1.53 +uint32_t event_run_slice( uint32_t nanosecs );
1.54 +void event_save_state( FILE *f );
1.55 +int event_load_state( FILE * f );
1.56 +
1.57 +struct dreamcast_module eventq_module = { "EVENTQ", event_init, event_reset, NULL, event_run_slice,
1.58 + NULL, event_save_state, event_load_state };
1.59 +
1.60 +static void event_update_pending( )
1.61 +{
1.62 + if( event_head == NULL ) {
1.63 + if( !(sh4r.event_types & PENDING_IRQ) ) {
1.64 + sh4r.event_pending = NOT_SCHEDULED;
1.65 + }
1.66 + sh4r.event_types &= (~PENDING_EVENT);
1.67 + } else {
1.68 + if( !(sh4r.event_types & PENDING_IRQ) ) {
1.69 + sh4r.event_pending = event_head->nanosecs;
1.70 + }
1.71 + sh4r.event_types |= PENDING_EVENT;
1.72 + }
1.73 +}
1.74 +
1.75 +uint32_t event_get_next_time( )
1.76 +{
1.77 + if( event_head == NULL ) {
1.78 + return NOT_SCHEDULED;
1.79 + } else {
1.80 + return event_head->nanosecs;
1.81 + }
1.82 +}
1.83 +
1.84 +/**
1.85 + * Add the event to the short queue.
1.86 + */
1.87 +static void event_enqueue( event_t event )
1.88 +{
1.89 + if( event_head == NULL || event->nanosecs < event_head->nanosecs ) {
1.90 + event->next = event_head;
1.91 + event_head = event;
1.92 + event_update_pending();
1.93 + } else {
1.94 + event_t cur = event_head;
1.95 + event_t next = cur->next;
1.96 + while( next != NULL && event->nanosecs >= next->nanosecs ) {
1.97 + cur = next;
1.98 + next = cur->next;
1.99 + }
1.100 + event->next = next;
1.101 + cur->next = event;
1.102 + }
1.103 +}
1.104 +
1.105 +static void event_dequeue( event_t event )
1.106 +{
1.107 + if( event_head == NULL ) {
1.108 + ERROR( "Empty event queue but should contain event %d", event->id );
1.109 + } else if( event_head == event ) {
1.110 + /* removing queue head */
1.111 + event_head = event_head->next;
1.112 + event_update_pending();
1.113 + } else {
1.114 + event_t cur = event_head;
1.115 + event_t next = cur->next;
1.116 + while( next != NULL ) {
1.117 + if( next == event ) {
1.118 + cur->next = next->next;
1.119 + break;
1.120 + }
1.121 + cur = next;
1.122 + next = cur->next;
1.123 + }
1.124 + }
1.125 +}
1.126 +
1.127 +static void event_dequeue_long( event_t event )
1.128 +{
1.129 + if( long_event_head == NULL ) {
1.130 + ERROR( "Empty long event queue but should contain event %d", event->id );
1.131 + } else if( long_event_head == event ) {
1.132 + /* removing queue head */
1.133 + long_event_head = long_event_head->next;
1.134 + } else {
1.135 + event_t cur = long_event_head;
1.136 + event_t next = cur->next;
1.137 + while( next != NULL ) {
1.138 + if( next == event ) {
1.139 + cur->next = next->next;
1.140 + break;
1.141 + }
1.142 + cur = next;
1.143 + next = cur->next;
1.144 + }
1.145 + }
1.146 +}
1.147 +
1.148 +void register_event_callback( int eventid, event_func_t func )
1.149 +{
1.150 + events[eventid].func = func;
1.151 +}
1.152 +
1.153 +void event_schedule( int eventid, uint32_t nanosecs )
1.154 +{
1.155 + int i;
1.156 +
1.157 + nanosecs += sh4r.slice_cycle;
1.158 +
1.159 + event_t event = &events[eventid];
1.160 +
1.161 + if( event->nanosecs != NOT_SCHEDULED ) {
1.162 + /* Event is already scheduled. Remove it from the list first */
1.163 + event_cancel(eventid);
1.164 + }
1.165 +
1.166 + event->id = eventid;
1.167 + event->seconds = 0;
1.168 + event->nanosecs = nanosecs;
1.169 +
1.170 + event_enqueue( event );
1.171 +}
1.172 +
1.173 +void event_schedule_long( int eventid, uint32_t seconds, uint32_t nanosecs ) {
1.174 + if( seconds == 0 ) {
1.175 + event_schedule( eventid, nanosecs );
1.176 + } else {
1.177 + event_t event = &events[eventid];
1.178 +
1.179 + if( event->nanosecs != NOT_SCHEDULED ) {
1.180 + /* Event is already scheduled. Remove it from the list first */
1.181 + event_cancel(eventid);
1.182 + }
1.183 +
1.184 + event->id = eventid;
1.185 + event->seconds = seconds;
1.186 + event->nanosecs = nanosecs;
1.187 + event->next = long_event_head;
1.188 + long_event_head = event;
1.189 + }
1.190 +
1.191 +}
1.192 +
1.193 +void event_cancel( int eventid )
1.194 +{
1.195 + event_t event = &events[eventid];
1.196 + if( event->nanosecs == NOT_SCHEDULED ) {
1.197 + return; /* not scheduled */
1.198 + } else {
1.199 + event->nanosecs = NOT_SCHEDULED;
1.200 + if( event->seconds != 0 ) { /* long term event */
1.201 + event_dequeue_long( event );
1.202 + } else {
1.203 + event_dequeue( event );
1.204 + }
1.205 + }
1.206 +}
1.207 +
1.208 +
1.209 +void event_execute()
1.210 +{
1.211 + /* Loop in case we missed some or got a couple scheduled for the same time */
1.212 + while( event_head != NULL && event_head->nanosecs <= sh4r.slice_cycle ) {
1.213 + event_t event = event_head;
1.214 + event_head = event->next;
1.215 + event->nanosecs = NOT_SCHEDULED;
1.216 + // Note: Make sure the internal state is consistent before calling the
1.217 + // user function, as it will (quite likely) enqueue another event.
1.218 + event->func( event->id );
1.219 + }
1.220 +
1.221 + event_update_pending();
1.222 +}
1.223 +
1.224 +void event_asic_callback( int eventid )
1.225 +{
1.226 + asic_event( eventid );
1.227 +}
1.228 +
1.229 +void event_init()
1.230 +{
1.231 + int i;
1.232 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.233 + events[i].id = i;
1.234 + events[i].nanosecs = NOT_SCHEDULED;
1.235 + if( i < 96 ) {
1.236 + events[i].func = event_asic_callback;
1.237 + } else {
1.238 + events[i].func = NULL;
1.239 + }
1.240 + events[i].next = NULL;
1.241 + }
1.242 + event_head = NULL;
1.243 + long_event_head = NULL;
1.244 + long_scan_time_remaining = LONG_SCAN_PERIOD;
1.245 +}
1.246 +
1.247 +
1.248 +
1.249 +void event_reset()
1.250 +{
1.251 + int i;
1.252 + event_head = NULL;
1.253 + long_event_head = NULL;
1.254 + long_scan_time_remaining = LONG_SCAN_PERIOD;
1.255 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.256 + events[i].nanosecs = NOT_SCHEDULED;
1.257 + }
1.258 +}
1.259 +
1.260 +void event_save_state( FILE *f )
1.261 +{
1.262 + int id, i;
1.263 + id = event_head == NULL ? -1 : event_head->id;
1.264 + fwrite( &id, sizeof(id), 1, f );
1.265 + id = long_event_head == NULL ? -1 : long_event_head->id;
1.266 + fwrite( &id, sizeof(id), 1, f );
1.267 + fwrite( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
1.268 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.269 + fwrite( &events[i].id, sizeof(uint32_t), 3, f ); /* First 3 words from structure */
1.270 + id = events[i].next == NULL ? -1 : events[i].next->id;
1.271 + fwrite( &id, sizeof(id), 1, f );
1.272 + }
1.273 +}
1.274 +
1.275 +int event_load_state( FILE *f )
1.276 +{
1.277 + int id, i;
1.278 + fread( &id, sizeof(id), 1, f );
1.279 + event_head = id == -1 ? NULL : &events[id];
1.280 + fread( &id, sizeof(id), 1, f );
1.281 + long_event_head = id == -1 ? NULL : &events[id];
1.282 + fread( &long_scan_time_remaining, sizeof(long_scan_time_remaining), 1, f );
1.283 + for( i=0; i<MAX_EVENT_ID; i++ ) {
1.284 + fread( &events[i].id, sizeof(uint32_t), 3, f );
1.285 + fread( &id, sizeof(id), 1, f );
1.286 + events[i].next = id == -1 ? NULL : &events[id];
1.287 + }
1.288 + return 0;
1.289 +}
1.290 +
1.291 +/**
1.292 + * Scan all entries in the long queue, decrementing each by 1 second. Entries
1.293 + * that are now < 1 second are moved to the short queue.
1.294 + */
1.295 +static void event_scan_long()
1.296 +{
1.297 + while( long_event_head != NULL && --long_event_head->seconds == 0 ) {
1.298 + event_t event = long_event_head;
1.299 + long_event_head = event->next;
1.300 + event_enqueue(event);
1.301 + }
1.302 +
1.303 + if( long_event_head != NULL ) {
1.304 + event_t last = long_event_head;
1.305 + event_t cur = last->next;
1.306 + while( cur != NULL ) {
1.307 + if( --cur->seconds == 0 ) {
1.308 + last->next = cur->next;
1.309 + event_enqueue(cur);
1.310 + } else {
1.311 + last = cur;
1.312 + }
1.313 + cur = last->next;
1.314 + }
1.315 + }
1.316 +}
1.317 +
1.318 +/**
1.319 + * Decrement the event time on all pending events by the supplied nanoseconds.
1.320 + * It may or may not be faster to wrap around instead, but this has the benefit
1.321 + * of simplicity.
1.322 + */
1.323 +uint32_t event_run_slice( uint32_t nanosecs )
1.324 +{
1.325 + event_t event = event_head;
1.326 + while( event != NULL ) {
1.327 + if( event->nanosecs <= nanosecs ) {
1.328 + event->nanosecs = 0;
1.329 + } else {
1.330 + event->nanosecs -= nanosecs;
1.331 + }
1.332 + event = event->next;
1.333 + }
1.334 +
1.335 + long_scan_time_remaining -= nanosecs;
1.336 + if( long_scan_time_remaining <= 0 ) {
1.337 + long_scan_time_remaining += LONG_SCAN_PERIOD;
1.338 + event_scan_long();
1.339 + }
1.340 +
1.341 + event_update_pending();
1.342 + return nanosecs;
1.343 +}
1.344 +
.