Search
lxdream.org :: lxdream :: r619:0800a0137472
lxdream 0.9.1
released Jun 29
Download Now
changeset619:0800a0137472
parent618:3ade50e8603c
child620:d9b28f78b952
authornkeynes
dateWed Jan 30 09:38:24 2008 +0000 (11 years ago)
Deliver TMU interrupts precisely rather than only at end of time slice
src/eventq.h
src/sh4/sh4.c
src/sh4/sh4trans.c
src/sh4/timer.c
1.1 --- a/src/eventq.h Wed Jan 30 02:39:57 2008 +0000
1.2 +++ b/src/eventq.h Wed Jan 30 09:38:24 2008 +0000
1.3 @@ -68,4 +68,6 @@
1.4
1.5 /* Events 1..96 are defined as the corresponding ASIC events. */
1.6
1.7 -
1.8 +#define EVENT_TMU0 97
1.9 +#define EVENT_TMU1 98
1.10 +#define EVENT_TMU2 99
2.1 --- a/src/sh4/sh4.c Wed Jan 30 02:39:57 2008 +0000
2.2 +++ b/src/sh4/sh4.c Wed Jan 30 09:38:24 2008 +0000
2.3 @@ -80,6 +80,7 @@
2.4 register_io_regions( mmio_list_sh4mmio );
2.5 sh4_main_ram = mem_get_region_by_name(MEM_REGION_MAIN);
2.6 MMU_init();
2.7 + TMU_init();
2.8 sh4_reset();
2.9 }
2.10
2.11 @@ -399,10 +400,6 @@
2.12 }
2.13 }
2.14 sh4r.slice_cycle = nanosecs;
2.15 - if( sleep_state != SH4_STATE_STANDBY ) {
2.16 - TMU_run_slice( nanosecs );
2.17 - SCIF_run_slice( nanosecs );
2.18 - }
2.19 return sh4r.slice_cycle;
2.20 }
2.21
3.1 --- a/src/sh4/sh4trans.c Wed Jan 30 02:39:57 2008 +0000
3.2 +++ b/src/sh4/sh4trans.c Wed Jan 30 09:38:24 2008 +0000
3.3 @@ -96,7 +96,7 @@
3.4
3.5 xlat_running = FALSE;
3.6 sh4_starting = FALSE;
3.7 -
3.8 + sh4r.slice_cycle = nanosecs;
3.9 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
3.10 TMU_run_slice( nanosecs );
3.11 SCIF_run_slice( nanosecs );
4.1 --- a/src/sh4/timer.c Wed Jan 30 02:39:57 2008 +0000
4.2 +++ b/src/sh4/timer.c Wed Jan 30 09:38:24 2008 +0000
4.3 @@ -17,12 +17,14 @@
4.4 * GNU General Public License for more details.
4.5 */
4.6
4.7 -#include "dream.h"
4.8 +#include <assert.h>
4.9 +#include "lxdream.h"
4.10 #include "mem.h"
4.11 #include "clock.h"
4.12 -#include "sh4core.h"
4.13 -#include "sh4mmio.h"
4.14 -#include "intc.h"
4.15 +#include "eventq.h"
4.16 +#include "sh4/sh4core.h"
4.17 +#include "sh4/sh4mmio.h"
4.18 +#include "sh4/intc.h"
4.19
4.20 /********************************* CPG *************************************/
4.21 /* This is the base clock from which all other clocks are derived */
4.22 @@ -100,8 +102,21 @@
4.23
4.24 /********************************** TMU *************************************/
4.25
4.26 +#define TMU_IS_RUNNING(timer) (MMIO_READ(TMU,TSTR) & (1<<timer))
4.27 +
4.28 uint32_t TMU_count( int timer, uint32_t nanosecs );
4.29
4.30 +void TMU_event_callback( int eventid )
4.31 +{
4.32 + TMU_count( eventid - EVENT_TMU0, sh4r.slice_cycle );
4.33 +}
4.34 +
4.35 +void TMU_init(void)
4.36 +{
4.37 + register_event_callback( EVENT_TMU0, TMU_event_callback );
4.38 + register_event_callback( EVENT_TMU1, TMU_event_callback );
4.39 + register_event_callback( EVENT_TMU2, TMU_event_callback );
4.40 +}
4.41
4.42 #define TCR_ICPF 0x0200
4.43 #define TCR_UNF 0x0100
4.44 @@ -115,22 +130,19 @@
4.45 uint32_t timer_run; /* cycles already run from this slice */
4.46 };
4.47
4.48 -struct TMU_timer TMU_timers[3];
4.49 +static struct TMU_timer TMU_timers[3];
4.50
4.51 int32_t mmio_region_TMU_read( uint32_t reg )
4.52 {
4.53 switch( reg ) {
4.54 case TCNT0:
4.55 TMU_count( 0, sh4r.slice_cycle );
4.56 - TMU_timers[0].timer_run = sh4r.slice_cycle;
4.57 break;
4.58 case TCNT1:
4.59 TMU_count( 1, sh4r.slice_cycle );
4.60 - TMU_timers[1].timer_run = sh4r.slice_cycle;
4.61 break;
4.62 case TCNT2:
4.63 TMU_count( 2, sh4r.slice_cycle );
4.64 - TMU_timers[2].timer_run = sh4r.slice_cycle;
4.65 break;
4.66 }
4.67 return MMIO_READ( TMU, reg );
4.68 @@ -187,10 +199,19 @@
4.69 MMIO_WRITE( TMU, TCR0 + (12*timer), tcr );
4.70 }
4.71
4.72 +void TMU_schedule_timer( int timer )
4.73 +{
4.74 + uint64_t duration = (uint64_t)((uint32_t)(MMIO_READ( TMU, TCNT0 + 12*timer )+1)) *
4.75 + (uint64_t)TMU_timers[timer].timer_period - TMU_timers[timer].timer_remainder;
4.76 + event_schedule_long( EVENT_TMU0+timer, (uint32_t)(duration / 1000000000),
4.77 + (uint32_t)(duration % 1000000000) );
4.78 +}
4.79 +
4.80 void TMU_start( int timer )
4.81 {
4.82 TMU_timers[timer].timer_run = sh4r.slice_cycle;
4.83 TMU_timers[timer].timer_remainder = 0;
4.84 + TMU_schedule_timer( timer );
4.85 }
4.86
4.87 /**
4.88 @@ -199,7 +220,7 @@
4.89 void TMU_stop( int timer )
4.90 {
4.91 TMU_count( timer, sh4r.slice_cycle );
4.92 - TMU_timers[timer].timer_run = sh4r.slice_cycle;
4.93 + event_cancel( EVENT_TMU0+timer );
4.94 }
4.95
4.96 /**
4.97 @@ -207,25 +228,28 @@
4.98 */
4.99 uint32_t TMU_count( int timer, uint32_t nanosecs )
4.100 {
4.101 - nanosecs = nanosecs + TMU_timers[timer].timer_remainder -
4.102 + uint32_t run_ns = nanosecs + TMU_timers[timer].timer_remainder -
4.103 TMU_timers[timer].timer_run;
4.104 TMU_timers[timer].timer_remainder =
4.105 - nanosecs % TMU_timers[timer].timer_period;
4.106 - uint32_t count = nanosecs / TMU_timers[timer].timer_period;
4.107 + run_ns % TMU_timers[timer].timer_period;
4.108 + TMU_timers[timer].timer_run = nanosecs;
4.109 + uint32_t count = run_ns / TMU_timers[timer].timer_period;
4.110 uint32_t value = MMIO_READ( TMU, TCNT0 + 12*timer );
4.111 uint32_t reset = MMIO_READ( TMU, TCOR0 + 12*timer );
4.112 if( count > value ) {
4.113 uint32_t tcr = MMIO_READ( TMU, TCR0 + 12*timer );
4.114 tcr |= TCR_UNF;
4.115 count -= value;
4.116 - value = reset - (count % reset);
4.117 + value = reset - (count % reset) + 1;
4.118 MMIO_WRITE( TMU, TCR0 + 12*timer, tcr );
4.119 if( tcr & TCR_UNIE )
4.120 intc_raise_interrupt( INT_TMU_TUNI0 + timer );
4.121 + MMIO_WRITE( TMU, TCNT0 + 12*timer, value );
4.122 + TMU_schedule_timer(timer);
4.123 } else {
4.124 value -= count;
4.125 + MMIO_WRITE( TMU, TCNT0 + 12*timer, value );
4.126 }
4.127 - MMIO_WRITE( TMU, TCNT0 + 12*timer, value );
4.128 return value;
4.129 }
4.130
4.131 @@ -253,27 +277,53 @@
4.132 case TCR2:
4.133 TMU_set_timer_control( 2, val );
4.134 return;
4.135 + case TCNT0:
4.136 + MMIO_WRITE( TMU, reg, val );
4.137 + if( TMU_IS_RUNNING(0) ) { // reschedule
4.138 + TMU_timers[0].timer_run = sh4r.slice_cycle;
4.139 + TMU_schedule_timer( 0 );
4.140 + }
4.141 + return;
4.142 + case TCNT1:
4.143 + MMIO_WRITE( TMU, reg, val );
4.144 + if( TMU_IS_RUNNING(1) ) { // reschedule
4.145 + TMU_timers[1].timer_run = sh4r.slice_cycle;
4.146 + TMU_schedule_timer( 1 );
4.147 + }
4.148 + return;
4.149 + case TCNT2:
4.150 + MMIO_WRITE( TMU, reg, val );
4.151 + if( TMU_IS_RUNNING(2) ) { // reschedule
4.152 + TMU_timers[2].timer_run = sh4r.slice_cycle;
4.153 + TMU_schedule_timer( 2 );
4.154 + }
4.155 + return;
4.156 }
4.157 MMIO_WRITE( TMU, reg, val );
4.158 }
4.159
4.160 -void TMU_run_slice( uint32_t nanosecs )
4.161 +void TMU_count_all( uint32_t nanosecs )
4.162 {
4.163 int tcr = MMIO_READ( TMU, TSTR );
4.164 if( tcr & 0x01 ) {
4.165 TMU_count( 0, nanosecs );
4.166 - TMU_timers[0].timer_run = 0;
4.167 }
4.168 if( tcr & 0x02 ) {
4.169 TMU_count( 1, nanosecs );
4.170 - TMU_timers[1].timer_run = 0;
4.171 }
4.172 if( tcr & 0x04 ) {
4.173 TMU_count( 2, nanosecs );
4.174 - TMU_timers[2].timer_run = 0;
4.175 }
4.176 }
4.177
4.178 +void TMU_run_slice( uint32_t nanosecs )
4.179 +{
4.180 + TMU_count_all( nanosecs );
4.181 + TMU_timers[0].timer_run = 0;
4.182 + TMU_timers[1].timer_run = 0;
4.183 + TMU_timers[2].timer_run = 0;
4.184 +}
4.185 +
4.186 void TMU_update_clocks()
4.187 {
4.188 TMU_set_timer_control( 0, MMIO_READ( TMU, TCR0 ) );
.