revision 1187:266e7a1bae90
summary |
tree |
shortlog |
changelog |
graph |
changeset |
raw | bz2 | zip | gz changeset | 1187:266e7a1bae90 |
parent | 1186:2dc47c67bb93 |
child | 1188:1cc9bb0b3848 |
author | nkeynes |
date | Tue Nov 29 17:52:43 2011 +1000 (12 years ago) |
Refactor event processing into sh4_handle_pending_events()
src/eventq.c | view | annotate | diff | log | ||
src/sh4/intc.c | view | annotate | diff | log | ||
src/sh4/sh4.c | view | annotate | diff | log | ||
src/sh4/sh4.h | view | annotate | diff | log | ||
src/sh4/sh4core.in | view | annotate | diff | log | ||
src/sh4/sh4trans.c | view | annotate | diff | log |
1.1 --- a/src/eventq.c Tue Nov 29 17:11:40 2011 +10001.2 +++ b/src/eventq.c Tue Nov 29 17:52:43 2011 +10001.3 @@ -60,12 +60,12 @@1.4 {1.5 if( event_head == NULL ) {1.6 if( !(sh4r.event_types & PENDING_IRQ) ) {1.7 - sh4r.event_pending = NOT_SCHEDULED;1.8 + sh4_set_event_pending(NOT_SCHEDULED);1.9 }1.10 sh4r.event_types &= (~PENDING_EVENT);1.11 } else {1.12 if( !(sh4r.event_types & PENDING_IRQ) ) {1.13 - sh4r.event_pending = event_head->nanosecs;1.14 + sh4_set_event_pending(event_head->nanosecs);1.15 }1.16 sh4r.event_types |= PENDING_EVENT;1.17 }
2.1 --- a/src/sh4/intc.c Tue Nov 29 17:11:40 2011 +10002.2 +++ b/src/sh4/intc.c Tue Nov 29 17:52:43 2011 +10002.3 @@ -110,7 +110,7 @@2.4 intc_state.num_pending = 0;2.5 for( i=0; i<INT_NUM_SOURCES; i++ )2.6 intc_state.priority[i] = intc_default_priority[i];2.7 - sh4r.event_pending = event_get_next_time();2.8 + sh4_set_event_pending( event_get_next_time() );2.9 sh4r.event_types &= (~PENDING_IRQ);2.10 }2.12 @@ -154,7 +154,7 @@2.13 intc_state.pending[i] = which;2.15 if( i == intc_state.num_pending && (sh4r.sr&SR_BL)==0 && SH4_INTMASK() < pri ) {2.16 - sh4r.event_pending = 0;2.17 + sh4_set_event_pending(0);2.18 sh4r.event_types |= PENDING_IRQ;2.19 }2.21 @@ -189,11 +189,11 @@2.22 {2.23 if( intc_state.num_pending > 0 && (sh4r.sr&SR_BL)==0 &&2.24 SH4_INTMASK() < PRIORITY(intc_state.pending[intc_state.num_pending-1]) ) {2.25 - sh4r.event_pending = 0;2.26 + sh4_set_event_pending(0);2.27 sh4r.event_types |= PENDING_IRQ ;2.28 }2.29 else {2.30 - sh4r.event_pending = event_get_next_time();2.31 + sh4_set_event_pending(event_get_next_time());2.32 sh4r.event_types &= (~PENDING_IRQ);2.33 }2.34 }
3.1 --- a/src/sh4/sh4.c Tue Nov 29 17:11:40 2011 +10003.2 +++ b/src/sh4/sh4.c Tue Nov 29 17:52:43 2011 +10003.3 @@ -403,6 +403,11 @@3.4 sh4r.new_pc = pc+2;3.5 }3.7 +void sh4_set_event_pending( uint32_t cycles )3.8 +{3.9 + sh4r.event_pending = cycles;3.10 +}3.11 +3.12 /**3.13 * Dump all SH4 core information for crash-dump purposes3.14 */3.15 @@ -664,6 +669,16 @@3.16 return addr != MMU_VMA_ERROR && mem_has_page(addr);3.17 }3.19 +void sh4_handle_pending_events() {3.20 + if( sh4r.event_types & PENDING_EVENT ) {3.21 + event_execute();3.22 + }3.23 + /* Eventq execute may (quite likely) deliver an immediate IRQ */3.24 + if( sh4r.event_types & PENDING_IRQ ) {3.25 + sh4_accept_interrupt();3.26 + }3.27 +}3.28 +3.29 /**3.30 * Go through ext_address_space page by page3.31 */
4.1 --- a/src/sh4/sh4.h Tue Nov 29 17:11:40 2011 +10004.2 +++ b/src/sh4/sh4.h Tue Nov 29 17:52:43 2011 +10004.3 @@ -124,6 +124,17 @@4.4 void sh4_set_pc( int pc );4.6 /**4.7 + * Set the time of the next pending event within the current timeslice.4.8 + */4.9 +void sh4_set_event_pending( uint32_t cycles );4.10 +4.11 +/**4.12 + * Handle an event that's due (note caller is responsible for ensuring that the4.13 + * event is in fact due).4.14 + */4.15 +void sh4_handle_pending_event();4.16 +4.17 +/**4.18 * Execute (using the emulator) a single instruction (in other words, perform a4.19 * single-step operation).4.20 */
5.1 --- a/src/sh4/sh4core.in Tue Nov 29 17:11:40 2011 +10005.2 +++ b/src/sh4/sh4core.in Tue Nov 29 17:52:43 2011 +10005.3 @@ -47,13 +47,7 @@5.4 if( sh4_breakpoint_count == 0 ) {5.5 for( ; sh4r.slice_cycle < nanosecs; sh4r.slice_cycle += sh4_cpu_period ) {5.6 if( SH4_EVENT_PENDING() ) {5.7 - if( sh4r.event_types & PENDING_EVENT ) {5.8 - event_execute();5.9 - }5.10 - /* Eventq execute may (quite likely) deliver an immediate IRQ */5.11 - if( sh4r.event_types & PENDING_IRQ ) {5.12 - sh4_accept_interrupt();5.13 - }5.14 + sh4_handle_pending_events();5.15 }5.16 if( !sh4_execute_instruction() ) {5.17 break;5.18 @@ -62,13 +56,7 @@5.19 } else {5.20 for( ;sh4r.slice_cycle < nanosecs; sh4r.slice_cycle += sh4_cpu_period ) {5.21 if( SH4_EVENT_PENDING() ) {5.22 - if( sh4r.event_types & PENDING_EVENT ) {5.23 - event_execute();5.24 - }5.25 - /* Eventq execute may (quite likely) deliver an immediate IRQ */5.26 - if( sh4r.event_types & PENDING_IRQ ) {5.27 - sh4_accept_interrupt();5.28 - }5.29 + sh4_handle_pending_events();5.30 }5.32 if( !sh4_execute_instruction() )
6.1 --- a/src/sh4/sh4trans.c Tue Nov 29 17:11:40 2011 +10006.2 +++ b/src/sh4/sh4trans.c Tue Nov 29 17:52:43 2011 +10006.3 @@ -34,18 +34,10 @@6.4 */6.5 uint32_t sh4_translate_run_slice( uint32_t nanosecs )6.6 {6.7 - void * (*code)() = NULL;6.8 event_schedule( EVENT_ENDTIMESLICE, nanosecs );6.9 for(;;) {6.10 if( sh4r.event_pending <= sh4r.slice_cycle ) {6.11 - if( sh4r.event_types & PENDING_EVENT ) {6.12 - event_execute();6.13 - }6.14 - /* Eventq execute may (quite likely) deliver an immediate IRQ */6.15 - if( sh4r.event_types & PENDING_IRQ ) {6.16 - sh4_accept_interrupt();6.17 - code = NULL;6.18 - }6.19 + sh4_handle_pending_events();6.20 if( sh4r.slice_cycle >= nanosecs )6.21 return nanosecs;6.22 }6.23 @@ -57,7 +49,7 @@6.24 syscall_invoke( pc );6.25 }6.27 - code = xlat_get_code_by_vma( sh4r.pc );6.28 + void * (*code)() = xlat_get_code_by_vma( sh4r.pc );6.29 if( code != NULL ) {6.30 while( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {6.31 code = XLAT_BLOCK_CHAIN(code);
.